1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/bitfield.h>
24#include <linux/delay.h>
25#include <linux/errno.h>
26#include <linux/i2c.h>
27#include <linux/init.h>
28#include <linux/kernel.h>
29#include <linux/random.h>
30#include <linux/sched.h>
31#include <linux/seq_file.h>
32#include <linux/iopoll.h>
33
34#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
35#include <linux/stacktrace.h>
36#include <linux/sort.h>
37#include <linux/timekeeping.h>
38#include <linux/math64.h>
39#endif
40
41#include <drm/drm_atomic.h>
42#include <drm/drm_atomic_helper.h>
43#include <drm/drm_dp_mst_helper.h>
44#include <drm/drm_drv.h>
45#include <drm/drm_print.h>
46#include <drm/drm_probe_helper.h>
47
48#include "drm_crtc_helper_internal.h"
49#include "drm_dp_mst_topology_internal.h"
50
51
52
53
54
55
56
57
58struct drm_dp_pending_up_req {
59 struct drm_dp_sideband_msg_hdr hdr;
60 struct drm_dp_sideband_msg_req_body msg;
61 struct list_head next;
62};
63
64static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
65 char *buf);
66
67static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
68
69static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
70 int id,
71 struct drm_dp_payload *payload);
72
73static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
74 struct drm_dp_mst_port *port,
75 int offset, int size, u8 *bytes);
76static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
77 struct drm_dp_mst_port *port,
78 int offset, int size, u8 *bytes);
79
80static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
81 struct drm_dp_mst_branch *mstb);
82
83static void
84drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
85 struct drm_dp_mst_branch *mstb);
86
87static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
88 struct drm_dp_mst_branch *mstb,
89 struct drm_dp_mst_port *port);
90static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
91 u8 *guid);
92
93static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port);
94static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port);
95static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
96
97#define DBG_PREFIX "[dp_mst]"
98
99#define DP_STR(x) [DP_ ## x] = #x
100
101static const char *drm_dp_mst_req_type_str(u8 req_type)
102{
103 static const char * const req_type_str[] = {
104 DP_STR(GET_MSG_TRANSACTION_VERSION),
105 DP_STR(LINK_ADDRESS),
106 DP_STR(CONNECTION_STATUS_NOTIFY),
107 DP_STR(ENUM_PATH_RESOURCES),
108 DP_STR(ALLOCATE_PAYLOAD),
109 DP_STR(QUERY_PAYLOAD),
110 DP_STR(RESOURCE_STATUS_NOTIFY),
111 DP_STR(CLEAR_PAYLOAD_ID_TABLE),
112 DP_STR(REMOTE_DPCD_READ),
113 DP_STR(REMOTE_DPCD_WRITE),
114 DP_STR(REMOTE_I2C_READ),
115 DP_STR(REMOTE_I2C_WRITE),
116 DP_STR(POWER_UP_PHY),
117 DP_STR(POWER_DOWN_PHY),
118 DP_STR(SINK_EVENT_NOTIFY),
119 DP_STR(QUERY_STREAM_ENC_STATUS),
120 };
121
122 if (req_type >= ARRAY_SIZE(req_type_str) ||
123 !req_type_str[req_type])
124 return "unknown";
125
126 return req_type_str[req_type];
127}
128
129#undef DP_STR
130#define DP_STR(x) [DP_NAK_ ## x] = #x
131
132static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
133{
134 static const char * const nak_reason_str[] = {
135 DP_STR(WRITE_FAILURE),
136 DP_STR(INVALID_READ),
137 DP_STR(CRC_FAILURE),
138 DP_STR(BAD_PARAM),
139 DP_STR(DEFER),
140 DP_STR(LINK_FAILURE),
141 DP_STR(NO_RESOURCES),
142 DP_STR(DPCD_FAIL),
143 DP_STR(I2C_NAK),
144 DP_STR(ALLOCATE_FAIL),
145 };
146
147 if (nak_reason >= ARRAY_SIZE(nak_reason_str) ||
148 !nak_reason_str[nak_reason])
149 return "unknown";
150
151 return nak_reason_str[nak_reason];
152}
153
154#undef DP_STR
155#define DP_STR(x) [DRM_DP_SIDEBAND_TX_ ## x] = #x
156
157static const char *drm_dp_mst_sideband_tx_state_str(int state)
158{
159 static const char * const sideband_reason_str[] = {
160 DP_STR(QUEUED),
161 DP_STR(START_SEND),
162 DP_STR(SENT),
163 DP_STR(RX),
164 DP_STR(TIMEOUT),
165 };
166
167 if (state >= ARRAY_SIZE(sideband_reason_str) ||
168 !sideband_reason_str[state])
169 return "unknown";
170
171 return sideband_reason_str[state];
172}
173
174static int
175drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)
176{
177 int i;
178 u8 unpacked_rad[16];
179
180 for (i = 0; i < lct; i++) {
181 if (i % 2)
182 unpacked_rad[i] = rad[i / 2] >> 4;
183 else
184 unpacked_rad[i] = rad[i / 2] & BIT_MASK(4);
185 }
186
187
188
189
190 return snprintf(out, len, "%*phC", lct, unpacked_rad);
191}
192
193
194static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
195{
196 u8 bitmask = 0x80;
197 u8 bitshift = 7;
198 u8 array_index = 0;
199 int number_of_bits = num_nibbles * 4;
200 u8 remainder = 0;
201
202 while (number_of_bits != 0) {
203 number_of_bits--;
204 remainder <<= 1;
205 remainder |= (data[array_index] & bitmask) >> bitshift;
206 bitmask >>= 1;
207 bitshift--;
208 if (bitmask == 0) {
209 bitmask = 0x80;
210 bitshift = 7;
211 array_index++;
212 }
213 if ((remainder & 0x10) == 0x10)
214 remainder ^= 0x13;
215 }
216
217 number_of_bits = 4;
218 while (number_of_bits != 0) {
219 number_of_bits--;
220 remainder <<= 1;
221 if ((remainder & 0x10) != 0)
222 remainder ^= 0x13;
223 }
224
225 return remainder;
226}
227
228static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
229{
230 u8 bitmask = 0x80;
231 u8 bitshift = 7;
232 u8 array_index = 0;
233 int number_of_bits = number_of_bytes * 8;
234 u16 remainder = 0;
235
236 while (number_of_bits != 0) {
237 number_of_bits--;
238 remainder <<= 1;
239 remainder |= (data[array_index] & bitmask) >> bitshift;
240 bitmask >>= 1;
241 bitshift--;
242 if (bitmask == 0) {
243 bitmask = 0x80;
244 bitshift = 7;
245 array_index++;
246 }
247 if ((remainder & 0x100) == 0x100)
248 remainder ^= 0xd5;
249 }
250
251 number_of_bits = 8;
252 while (number_of_bits != 0) {
253 number_of_bits--;
254 remainder <<= 1;
255 if ((remainder & 0x100) != 0)
256 remainder ^= 0xd5;
257 }
258
259 return remainder & 0xff;
260}
261static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
262{
263 u8 size = 3;
264
265 size += (hdr->lct / 2);
266 return size;
267}
268
269static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
270 u8 *buf, int *len)
271{
272 int idx = 0;
273 int i;
274 u8 crc4;
275
276 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
277 for (i = 0; i < (hdr->lct / 2); i++)
278 buf[idx++] = hdr->rad[i];
279 buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
280 (hdr->msg_len & 0x3f);
281 buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
282
283 crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
284 buf[idx - 1] |= (crc4 & 0xf);
285
286 *len = idx;
287}
288
289static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
290 u8 *buf, int buflen, u8 *hdrlen)
291{
292 u8 crc4;
293 u8 len;
294 int i;
295 u8 idx;
296
297 if (buf[0] == 0)
298 return false;
299 len = 3;
300 len += ((buf[0] & 0xf0) >> 4) / 2;
301 if (len > buflen)
302 return false;
303 crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
304
305 if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
306 DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
307 return false;
308 }
309
310 hdr->lct = (buf[0] & 0xf0) >> 4;
311 hdr->lcr = (buf[0] & 0xf);
312 idx = 1;
313 for (i = 0; i < (hdr->lct / 2); i++)
314 hdr->rad[i] = buf[idx++];
315 hdr->broadcast = (buf[idx] >> 7) & 0x1;
316 hdr->path_msg = (buf[idx] >> 6) & 0x1;
317 hdr->msg_len = buf[idx] & 0x3f;
318 idx++;
319 hdr->somt = (buf[idx] >> 7) & 0x1;
320 hdr->eomt = (buf[idx] >> 6) & 0x1;
321 hdr->seqno = (buf[idx] >> 4) & 0x1;
322 idx++;
323 *hdrlen = idx;
324 return true;
325}
326
327void
328drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
329 struct drm_dp_sideband_msg_tx *raw)
330{
331 int idx = 0;
332 int i;
333 u8 *buf = raw->msg;
334
335 buf[idx++] = req->req_type & 0x7f;
336
337 switch (req->req_type) {
338 case DP_ENUM_PATH_RESOURCES:
339 case DP_POWER_DOWN_PHY:
340 case DP_POWER_UP_PHY:
341 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
342 idx++;
343 break;
344 case DP_ALLOCATE_PAYLOAD:
345 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
346 (req->u.allocate_payload.number_sdp_streams & 0xf);
347 idx++;
348 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
349 idx++;
350 buf[idx] = (req->u.allocate_payload.pbn >> 8);
351 idx++;
352 buf[idx] = (req->u.allocate_payload.pbn & 0xff);
353 idx++;
354 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
355 buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
356 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
357 idx++;
358 }
359 if (req->u.allocate_payload.number_sdp_streams & 1) {
360 i = req->u.allocate_payload.number_sdp_streams - 1;
361 buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
362 idx++;
363 }
364 break;
365 case DP_QUERY_PAYLOAD:
366 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
367 idx++;
368 buf[idx] = (req->u.query_payload.vcpi & 0x7f);
369 idx++;
370 break;
371 case DP_REMOTE_DPCD_READ:
372 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
373 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
374 idx++;
375 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
376 idx++;
377 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
378 idx++;
379 buf[idx] = (req->u.dpcd_read.num_bytes);
380 idx++;
381 break;
382
383 case DP_REMOTE_DPCD_WRITE:
384 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
385 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
386 idx++;
387 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
388 idx++;
389 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
390 idx++;
391 buf[idx] = (req->u.dpcd_write.num_bytes);
392 idx++;
393 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
394 idx += req->u.dpcd_write.num_bytes;
395 break;
396 case DP_REMOTE_I2C_READ:
397 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
398 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
399 idx++;
400 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
401 buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
402 idx++;
403 buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
404 idx++;
405 memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
406 idx += req->u.i2c_read.transactions[i].num_bytes;
407
408 buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4;
409 buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
410 idx++;
411 }
412 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
413 idx++;
414 buf[idx] = (req->u.i2c_read.num_bytes_read);
415 idx++;
416 break;
417
418 case DP_REMOTE_I2C_WRITE:
419 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
420 idx++;
421 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
422 idx++;
423 buf[idx] = (req->u.i2c_write.num_bytes);
424 idx++;
425 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
426 idx += req->u.i2c_write.num_bytes;
427 break;
428 case DP_QUERY_STREAM_ENC_STATUS: {
429 const struct drm_dp_query_stream_enc_status *msg;
430
431 msg = &req->u.enc_status;
432 buf[idx] = msg->stream_id;
433 idx++;
434 memcpy(&buf[idx], msg->client_id, sizeof(msg->client_id));
435 idx += sizeof(msg->client_id);
436 buf[idx] = 0;
437 buf[idx] |= FIELD_PREP(GENMASK(1, 0), msg->stream_event);
438 buf[idx] |= msg->valid_stream_event ? BIT(2) : 0;
439 buf[idx] |= FIELD_PREP(GENMASK(4, 3), msg->stream_behavior);
440 buf[idx] |= msg->valid_stream_behavior ? BIT(5) : 0;
441 idx++;
442 }
443 break;
444 }
445 raw->cur_len = idx;
446}
447EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_encode_sideband_req);
448
449
450int
451drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
452 struct drm_dp_sideband_msg_req_body *req)
453{
454 const u8 *buf = raw->msg;
455 int i, idx = 0;
456
457 req->req_type = buf[idx++] & 0x7f;
458 switch (req->req_type) {
459 case DP_ENUM_PATH_RESOURCES:
460 case DP_POWER_DOWN_PHY:
461 case DP_POWER_UP_PHY:
462 req->u.port_num.port_number = (buf[idx] >> 4) & 0xf;
463 break;
464 case DP_ALLOCATE_PAYLOAD:
465 {
466 struct drm_dp_allocate_payload *a =
467 &req->u.allocate_payload;
468
469 a->number_sdp_streams = buf[idx] & 0xf;
470 a->port_number = (buf[idx] >> 4) & 0xf;
471
472 WARN_ON(buf[++idx] & 0x80);
473 a->vcpi = buf[idx] & 0x7f;
474
475 a->pbn = buf[++idx] << 8;
476 a->pbn |= buf[++idx];
477
478 idx++;
479 for (i = 0; i < a->number_sdp_streams; i++) {
480 a->sdp_stream_sink[i] =
481 (buf[idx + (i / 2)] >> ((i % 2) ? 0 : 4)) & 0xf;
482 }
483 }
484 break;
485 case DP_QUERY_PAYLOAD:
486 req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf;
487 WARN_ON(buf[++idx] & 0x80);
488 req->u.query_payload.vcpi = buf[idx] & 0x7f;
489 break;
490 case DP_REMOTE_DPCD_READ:
491 {
492 struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read;
493
494 r->port_number = (buf[idx] >> 4) & 0xf;
495
496 r->dpcd_address = (buf[idx] << 16) & 0xf0000;
497 r->dpcd_address |= (buf[++idx] << 8) & 0xff00;
498 r->dpcd_address |= buf[++idx] & 0xff;
499
500 r->num_bytes = buf[++idx];
501 }
502 break;
503 case DP_REMOTE_DPCD_WRITE:
504 {
505 struct drm_dp_remote_dpcd_write *w =
506 &req->u.dpcd_write;
507
508 w->port_number = (buf[idx] >> 4) & 0xf;
509
510 w->dpcd_address = (buf[idx] << 16) & 0xf0000;
511 w->dpcd_address |= (buf[++idx] << 8) & 0xff00;
512 w->dpcd_address |= buf[++idx] & 0xff;
513
514 w->num_bytes = buf[++idx];
515
516 w->bytes = kmemdup(&buf[++idx], w->num_bytes,
517 GFP_KERNEL);
518 if (!w->bytes)
519 return -ENOMEM;
520 }
521 break;
522 case DP_REMOTE_I2C_READ:
523 {
524 struct drm_dp_remote_i2c_read *r = &req->u.i2c_read;
525 struct drm_dp_remote_i2c_read_tx *tx;
526 bool failed = false;
527
528 r->num_transactions = buf[idx] & 0x3;
529 r->port_number = (buf[idx] >> 4) & 0xf;
530 for (i = 0; i < r->num_transactions; i++) {
531 tx = &r->transactions[i];
532
533 tx->i2c_dev_id = buf[++idx] & 0x7f;
534 tx->num_bytes = buf[++idx];
535 tx->bytes = kmemdup(&buf[++idx],
536 tx->num_bytes,
537 GFP_KERNEL);
538 if (!tx->bytes) {
539 failed = true;
540 break;
541 }
542 idx += tx->num_bytes;
543 tx->no_stop_bit = (buf[idx] >> 5) & 0x1;
544 tx->i2c_transaction_delay = buf[idx] & 0xf;
545 }
546
547 if (failed) {
548 for (i = 0; i < r->num_transactions; i++) {
549 tx = &r->transactions[i];
550 kfree(tx->bytes);
551 }
552 return -ENOMEM;
553 }
554
555 r->read_i2c_device_id = buf[++idx] & 0x7f;
556 r->num_bytes_read = buf[++idx];
557 }
558 break;
559 case DP_REMOTE_I2C_WRITE:
560 {
561 struct drm_dp_remote_i2c_write *w = &req->u.i2c_write;
562
563 w->port_number = (buf[idx] >> 4) & 0xf;
564 w->write_i2c_device_id = buf[++idx] & 0x7f;
565 w->num_bytes = buf[++idx];
566 w->bytes = kmemdup(&buf[++idx], w->num_bytes,
567 GFP_KERNEL);
568 if (!w->bytes)
569 return -ENOMEM;
570 }
571 break;
572 case DP_QUERY_STREAM_ENC_STATUS:
573 req->u.enc_status.stream_id = buf[idx++];
574 for (i = 0; i < sizeof(req->u.enc_status.client_id); i++)
575 req->u.enc_status.client_id[i] = buf[idx++];
576
577 req->u.enc_status.stream_event = FIELD_GET(GENMASK(1, 0),
578 buf[idx]);
579 req->u.enc_status.valid_stream_event = FIELD_GET(BIT(2),
580 buf[idx]);
581 req->u.enc_status.stream_behavior = FIELD_GET(GENMASK(4, 3),
582 buf[idx]);
583 req->u.enc_status.valid_stream_behavior = FIELD_GET(BIT(5),
584 buf[idx]);
585 break;
586 }
587
588 return 0;
589}
590EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_decode_sideband_req);
591
592void
593drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req,
594 int indent, struct drm_printer *printer)
595{
596 int i;
597
598#define P(f, ...) drm_printf_indent(printer, indent, f, ##__VA_ARGS__)
599 if (req->req_type == DP_LINK_ADDRESS) {
600
601 P("type=%s\n", drm_dp_mst_req_type_str(req->req_type));
602 return;
603 }
604
605 P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type));
606 indent++;
607
608 switch (req->req_type) {
609 case DP_ENUM_PATH_RESOURCES:
610 case DP_POWER_DOWN_PHY:
611 case DP_POWER_UP_PHY:
612 P("port=%d\n", req->u.port_num.port_number);
613 break;
614 case DP_ALLOCATE_PAYLOAD:
615 P("port=%d vcpi=%d pbn=%d sdp_streams=%d %*ph\n",
616 req->u.allocate_payload.port_number,
617 req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn,
618 req->u.allocate_payload.number_sdp_streams,
619 req->u.allocate_payload.number_sdp_streams,
620 req->u.allocate_payload.sdp_stream_sink);
621 break;
622 case DP_QUERY_PAYLOAD:
623 P("port=%d vcpi=%d\n",
624 req->u.query_payload.port_number,
625 req->u.query_payload.vcpi);
626 break;
627 case DP_REMOTE_DPCD_READ:
628 P("port=%d dpcd_addr=%05x len=%d\n",
629 req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address,
630 req->u.dpcd_read.num_bytes);
631 break;
632 case DP_REMOTE_DPCD_WRITE:
633 P("port=%d addr=%05x len=%d: %*ph\n",
634 req->u.dpcd_write.port_number,
635 req->u.dpcd_write.dpcd_address,
636 req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes,
637 req->u.dpcd_write.bytes);
638 break;
639 case DP_REMOTE_I2C_READ:
640 P("port=%d num_tx=%d id=%d size=%d:\n",
641 req->u.i2c_read.port_number,
642 req->u.i2c_read.num_transactions,
643 req->u.i2c_read.read_i2c_device_id,
644 req->u.i2c_read.num_bytes_read);
645
646 indent++;
647 for (i = 0; i < req->u.i2c_read.num_transactions; i++) {
648 const struct drm_dp_remote_i2c_read_tx *rtx =
649 &req->u.i2c_read.transactions[i];
650
651 P("%d: id=%03d size=%03d no_stop_bit=%d tx_delay=%03d: %*ph\n",
652 i, rtx->i2c_dev_id, rtx->num_bytes,
653 rtx->no_stop_bit, rtx->i2c_transaction_delay,
654 rtx->num_bytes, rtx->bytes);
655 }
656 break;
657 case DP_REMOTE_I2C_WRITE:
658 P("port=%d id=%d size=%d: %*ph\n",
659 req->u.i2c_write.port_number,
660 req->u.i2c_write.write_i2c_device_id,
661 req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes,
662 req->u.i2c_write.bytes);
663 break;
664 case DP_QUERY_STREAM_ENC_STATUS:
665 P("stream_id=%u client_id=%*ph stream_event=%x "
666 "valid_event=%d stream_behavior=%x valid_behavior=%d",
667 req->u.enc_status.stream_id,
668 (int)ARRAY_SIZE(req->u.enc_status.client_id),
669 req->u.enc_status.client_id, req->u.enc_status.stream_event,
670 req->u.enc_status.valid_stream_event,
671 req->u.enc_status.stream_behavior,
672 req->u.enc_status.valid_stream_behavior);
673 break;
674 default:
675 P("???\n");
676 break;
677 }
678#undef P
679}
680EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_dump_sideband_msg_req_body);
681
682static inline void
683drm_dp_mst_dump_sideband_msg_tx(struct drm_printer *p,
684 const struct drm_dp_sideband_msg_tx *txmsg)
685{
686 struct drm_dp_sideband_msg_req_body req;
687 char buf[64];
688 int ret;
689 int i;
690
691 drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf,
692 sizeof(buf));
693 drm_printf(p, "txmsg cur_offset=%x cur_len=%x seqno=%x state=%s path_msg=%d dst=%s\n",
694 txmsg->cur_offset, txmsg->cur_len, txmsg->seqno,
695 drm_dp_mst_sideband_tx_state_str(txmsg->state),
696 txmsg->path_msg, buf);
697
698 ret = drm_dp_decode_sideband_req(txmsg, &req);
699 if (ret) {
700 drm_printf(p, "<failed to decode sideband req: %d>\n", ret);
701 return;
702 }
703 drm_dp_dump_sideband_msg_req_body(&req, 1, p);
704
705 switch (req.req_type) {
706 case DP_REMOTE_DPCD_WRITE:
707 kfree(req.u.dpcd_write.bytes);
708 break;
709 case DP_REMOTE_I2C_READ:
710 for (i = 0; i < req.u.i2c_read.num_transactions; i++)
711 kfree(req.u.i2c_read.transactions[i].bytes);
712 break;
713 case DP_REMOTE_I2C_WRITE:
714 kfree(req.u.i2c_write.bytes);
715 break;
716 }
717}
718
719static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
720{
721 u8 crc4;
722
723 crc4 = drm_dp_msg_data_crc4(msg, len);
724 msg[len] = crc4;
725}
726
727static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
728 struct drm_dp_sideband_msg_tx *raw)
729{
730 int idx = 0;
731 u8 *buf = raw->msg;
732
733 buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
734
735 raw->cur_len = idx;
736}
737
738static int drm_dp_sideband_msg_set_header(struct drm_dp_sideband_msg_rx *msg,
739 struct drm_dp_sideband_msg_hdr *hdr,
740 u8 hdrlen)
741{
742
743
744
745
746 if (!hdr->somt && !msg->have_somt)
747 return false;
748
749
750 msg->curchunk_idx = 0;
751 msg->curchunk_len = hdr->msg_len;
752 msg->curchunk_hdrlen = hdrlen;
753
754
755 if (hdr->somt && msg->have_somt)
756 return false;
757
758 if (hdr->somt) {
759 memcpy(&msg->initial_hdr, hdr,
760 sizeof(struct drm_dp_sideband_msg_hdr));
761 msg->have_somt = true;
762 }
763 if (hdr->eomt)
764 msg->have_eomt = true;
765
766 return true;
767}
768
769
770static bool drm_dp_sideband_append_payload(struct drm_dp_sideband_msg_rx *msg,
771 u8 *replybuf, u8 replybuflen)
772{
773 u8 crc4;
774
775 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
776 msg->curchunk_idx += replybuflen;
777
778 if (msg->curchunk_idx >= msg->curchunk_len) {
779
780 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
781 if (crc4 != msg->chunk[msg->curchunk_len - 1])
782 print_hex_dump(KERN_DEBUG, "wrong crc",
783 DUMP_PREFIX_NONE, 16, 1,
784 msg->chunk, msg->curchunk_len, false);
785
786 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
787 msg->curlen += msg->curchunk_len - 1;
788 }
789 return true;
790}
791
792static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
793 struct drm_dp_sideband_msg_reply_body *repmsg)
794{
795 int idx = 1;
796 int i;
797
798 memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
799 idx += 16;
800 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
801 idx++;
802 if (idx > raw->curlen)
803 goto fail_len;
804 for (i = 0; i < repmsg->u.link_addr.nports; i++) {
805 if (raw->msg[idx] & 0x80)
806 repmsg->u.link_addr.ports[i].input_port = 1;
807
808 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
809 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
810
811 idx++;
812 if (idx > raw->curlen)
813 goto fail_len;
814 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
815 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
816 if (repmsg->u.link_addr.ports[i].input_port == 0)
817 repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
818 idx++;
819 if (idx > raw->curlen)
820 goto fail_len;
821 if (repmsg->u.link_addr.ports[i].input_port == 0) {
822 repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
823 idx++;
824 if (idx > raw->curlen)
825 goto fail_len;
826 memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
827 idx += 16;
828 if (idx > raw->curlen)
829 goto fail_len;
830 repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
831 repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
832 idx++;
833
834 }
835 if (idx > raw->curlen)
836 goto fail_len;
837 }
838
839 return true;
840fail_len:
841 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
842 return false;
843}
844
845static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
846 struct drm_dp_sideband_msg_reply_body *repmsg)
847{
848 int idx = 1;
849
850 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
851 idx++;
852 if (idx > raw->curlen)
853 goto fail_len;
854 repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
855 idx++;
856 if (idx > raw->curlen)
857 goto fail_len;
858
859 memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
860 return true;
861fail_len:
862 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
863 return false;
864}
865
866static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
867 struct drm_dp_sideband_msg_reply_body *repmsg)
868{
869 int idx = 1;
870
871 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
872 idx++;
873 if (idx > raw->curlen)
874 goto fail_len;
875 return true;
876fail_len:
877 DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
878 return false;
879}
880
881static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
882 struct drm_dp_sideband_msg_reply_body *repmsg)
883{
884 int idx = 1;
885
886 repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
887 idx++;
888 if (idx > raw->curlen)
889 goto fail_len;
890 repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
891 idx++;
892
893 memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
894 return true;
895fail_len:
896 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
897 return false;
898}
899
900static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
901 struct drm_dp_sideband_msg_reply_body *repmsg)
902{
903 int idx = 1;
904
905 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
906 repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1;
907 idx++;
908 if (idx > raw->curlen)
909 goto fail_len;
910 repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
911 idx += 2;
912 if (idx > raw->curlen)
913 goto fail_len;
914 repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
915 idx += 2;
916 if (idx > raw->curlen)
917 goto fail_len;
918 return true;
919fail_len:
920 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
921 return false;
922}
923
924static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
925 struct drm_dp_sideband_msg_reply_body *repmsg)
926{
927 int idx = 1;
928
929 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
930 idx++;
931 if (idx > raw->curlen)
932 goto fail_len;
933 repmsg->u.allocate_payload.vcpi = raw->msg[idx];
934 idx++;
935 if (idx > raw->curlen)
936 goto fail_len;
937 repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
938 idx += 2;
939 if (idx > raw->curlen)
940 goto fail_len;
941 return true;
942fail_len:
943 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
944 return false;
945}
946
947static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
948 struct drm_dp_sideband_msg_reply_body *repmsg)
949{
950 int idx = 1;
951
952 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
953 idx++;
954 if (idx > raw->curlen)
955 goto fail_len;
956 repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
957 idx += 2;
958 if (idx > raw->curlen)
959 goto fail_len;
960 return true;
961fail_len:
962 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
963 return false;
964}
965
966static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,
967 struct drm_dp_sideband_msg_reply_body *repmsg)
968{
969 int idx = 1;
970
971 repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;
972 idx++;
973 if (idx > raw->curlen) {
974 DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n",
975 idx, raw->curlen);
976 return false;
977 }
978 return true;
979}
980
981static bool
982drm_dp_sideband_parse_query_stream_enc_status(
983 struct drm_dp_sideband_msg_rx *raw,
984 struct drm_dp_sideband_msg_reply_body *repmsg)
985{
986 struct drm_dp_query_stream_enc_status_ack_reply *reply;
987
988 reply = &repmsg->u.enc_status;
989
990 reply->stream_id = raw->msg[3];
991
992 reply->reply_signed = raw->msg[2] & BIT(0);
993
994
995
996
997
998
999
1000
1001
1002 reply->hdcp_1x_device_present = raw->msg[2] & BIT(4);
1003 reply->hdcp_2x_device_present = raw->msg[2] & BIT(3);
1004
1005 reply->query_capable_device_present = raw->msg[2] & BIT(5);
1006 reply->legacy_device_present = raw->msg[2] & BIT(6);
1007 reply->unauthorizable_device_present = raw->msg[2] & BIT(7);
1008
1009 reply->auth_completed = !!(raw->msg[1] & BIT(3));
1010 reply->encryption_enabled = !!(raw->msg[1] & BIT(4));
1011 reply->repeater_present = !!(raw->msg[1] & BIT(5));
1012 reply->state = (raw->msg[1] & GENMASK(7, 6)) >> 6;
1013
1014 return true;
1015}
1016
1017static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
1018 struct drm_dp_sideband_msg_reply_body *msg)
1019{
1020 memset(msg, 0, sizeof(*msg));
1021 msg->reply_type = (raw->msg[0] & 0x80) >> 7;
1022 msg->req_type = (raw->msg[0] & 0x7f);
1023
1024 if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
1025 memcpy(msg->u.nak.guid, &raw->msg[1], 16);
1026 msg->u.nak.reason = raw->msg[17];
1027 msg->u.nak.nak_data = raw->msg[18];
1028 return false;
1029 }
1030
1031 switch (msg->req_type) {
1032 case DP_LINK_ADDRESS:
1033 return drm_dp_sideband_parse_link_address(raw, msg);
1034 case DP_QUERY_PAYLOAD:
1035 return drm_dp_sideband_parse_query_payload_ack(raw, msg);
1036 case DP_REMOTE_DPCD_READ:
1037 return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
1038 case DP_REMOTE_DPCD_WRITE:
1039 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
1040 case DP_REMOTE_I2C_READ:
1041 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
1042 case DP_REMOTE_I2C_WRITE:
1043 return true;
1044 case DP_ENUM_PATH_RESOURCES:
1045 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
1046 case DP_ALLOCATE_PAYLOAD:
1047 return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
1048 case DP_POWER_DOWN_PHY:
1049 case DP_POWER_UP_PHY:
1050 return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
1051 case DP_CLEAR_PAYLOAD_ID_TABLE:
1052 return true;
1053 case DP_QUERY_STREAM_ENC_STATUS:
1054 return drm_dp_sideband_parse_query_stream_enc_status(raw, msg);
1055 default:
1056 DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type,
1057 drm_dp_mst_req_type_str(msg->req_type));
1058 return false;
1059 }
1060}
1061
1062static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
1063 struct drm_dp_sideband_msg_req_body *msg)
1064{
1065 int idx = 1;
1066
1067 msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
1068 idx++;
1069 if (idx > raw->curlen)
1070 goto fail_len;
1071
1072 memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
1073 idx += 16;
1074 if (idx > raw->curlen)
1075 goto fail_len;
1076
1077 msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
1078 msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
1079 msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
1080 msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
1081 msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
1082 idx++;
1083 return true;
1084fail_len:
1085 DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
1086 return false;
1087}
1088
1089static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
1090 struct drm_dp_sideband_msg_req_body *msg)
1091{
1092 int idx = 1;
1093
1094 msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
1095 idx++;
1096 if (idx > raw->curlen)
1097 goto fail_len;
1098
1099 memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
1100 idx += 16;
1101 if (idx > raw->curlen)
1102 goto fail_len;
1103
1104 msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
1105 idx++;
1106 return true;
1107fail_len:
1108 DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
1109 return false;
1110}
1111
1112static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
1113 struct drm_dp_sideband_msg_req_body *msg)
1114{
1115 memset(msg, 0, sizeof(*msg));
1116 msg->req_type = (raw->msg[0] & 0x7f);
1117
1118 switch (msg->req_type) {
1119 case DP_CONNECTION_STATUS_NOTIFY:
1120 return drm_dp_sideband_parse_connection_status_notify(raw, msg);
1121 case DP_RESOURCE_STATUS_NOTIFY:
1122 return drm_dp_sideband_parse_resource_status_notify(raw, msg);
1123 default:
1124 DRM_ERROR("Got unknown request 0x%02x (%s)\n", msg->req_type,
1125 drm_dp_mst_req_type_str(msg->req_type));
1126 return false;
1127 }
1128}
1129
1130static void build_dpcd_write(struct drm_dp_sideband_msg_tx *msg,
1131 u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
1132{
1133 struct drm_dp_sideband_msg_req_body req;
1134
1135 req.req_type = DP_REMOTE_DPCD_WRITE;
1136 req.u.dpcd_write.port_number = port_num;
1137 req.u.dpcd_write.dpcd_address = offset;
1138 req.u.dpcd_write.num_bytes = num_bytes;
1139 req.u.dpcd_write.bytes = bytes;
1140 drm_dp_encode_sideband_req(&req, msg);
1141}
1142
1143static void build_link_address(struct drm_dp_sideband_msg_tx *msg)
1144{
1145 struct drm_dp_sideband_msg_req_body req;
1146
1147 req.req_type = DP_LINK_ADDRESS;
1148 drm_dp_encode_sideband_req(&req, msg);
1149}
1150
1151static void build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
1152{
1153 struct drm_dp_sideband_msg_req_body req;
1154
1155 req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
1156 drm_dp_encode_sideband_req(&req, msg);
1157}
1158
1159static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg,
1160 int port_num)
1161{
1162 struct drm_dp_sideband_msg_req_body req;
1163
1164 req.req_type = DP_ENUM_PATH_RESOURCES;
1165 req.u.port_num.port_number = port_num;
1166 drm_dp_encode_sideband_req(&req, msg);
1167 msg->path_msg = true;
1168 return 0;
1169}
1170
1171static void build_allocate_payload(struct drm_dp_sideband_msg_tx *msg,
1172 int port_num,
1173 u8 vcpi, uint16_t pbn,
1174 u8 number_sdp_streams,
1175 u8 *sdp_stream_sink)
1176{
1177 struct drm_dp_sideband_msg_req_body req;
1178
1179 memset(&req, 0, sizeof(req));
1180 req.req_type = DP_ALLOCATE_PAYLOAD;
1181 req.u.allocate_payload.port_number = port_num;
1182 req.u.allocate_payload.vcpi = vcpi;
1183 req.u.allocate_payload.pbn = pbn;
1184 req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
1185 memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
1186 number_sdp_streams);
1187 drm_dp_encode_sideband_req(&req, msg);
1188 msg->path_msg = true;
1189}
1190
1191static void build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
1192 int port_num, bool power_up)
1193{
1194 struct drm_dp_sideband_msg_req_body req;
1195
1196 if (power_up)
1197 req.req_type = DP_POWER_UP_PHY;
1198 else
1199 req.req_type = DP_POWER_DOWN_PHY;
1200
1201 req.u.port_num.port_number = port_num;
1202 drm_dp_encode_sideband_req(&req, msg);
1203 msg->path_msg = true;
1204}
1205
1206static int
1207build_query_stream_enc_status(struct drm_dp_sideband_msg_tx *msg, u8 stream_id,
1208 u8 *q_id)
1209{
1210 struct drm_dp_sideband_msg_req_body req;
1211
1212 req.req_type = DP_QUERY_STREAM_ENC_STATUS;
1213 req.u.enc_status.stream_id = stream_id;
1214 memcpy(req.u.enc_status.client_id, q_id,
1215 sizeof(req.u.enc_status.client_id));
1216 req.u.enc_status.stream_event = 0;
1217 req.u.enc_status.valid_stream_event = false;
1218 req.u.enc_status.stream_behavior = 0;
1219 req.u.enc_status.valid_stream_behavior = false;
1220
1221 drm_dp_encode_sideband_req(&req, msg);
1222 return 0;
1223}
1224
1225static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
1226 struct drm_dp_vcpi *vcpi)
1227{
1228 int ret, vcpi_ret;
1229
1230 mutex_lock(&mgr->payload_lock);
1231 ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
1232 if (ret > mgr->max_payloads) {
1233 ret = -EINVAL;
1234 DRM_DEBUG_KMS("out of payload ids %d\n", ret);
1235 goto out_unlock;
1236 }
1237
1238 vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
1239 if (vcpi_ret > mgr->max_payloads) {
1240 ret = -EINVAL;
1241 DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
1242 goto out_unlock;
1243 }
1244
1245 set_bit(ret, &mgr->payload_mask);
1246 set_bit(vcpi_ret, &mgr->vcpi_mask);
1247 vcpi->vcpi = vcpi_ret + 1;
1248 mgr->proposed_vcpis[ret - 1] = vcpi;
1249out_unlock:
1250 mutex_unlock(&mgr->payload_lock);
1251 return ret;
1252}
1253
1254static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
1255 int vcpi)
1256{
1257 int i;
1258
1259 if (vcpi == 0)
1260 return;
1261
1262 mutex_lock(&mgr->payload_lock);
1263 DRM_DEBUG_KMS("putting payload %d\n", vcpi);
1264 clear_bit(vcpi - 1, &mgr->vcpi_mask);
1265
1266 for (i = 0; i < mgr->max_payloads; i++) {
1267 if (mgr->proposed_vcpis[i] &&
1268 mgr->proposed_vcpis[i]->vcpi == vcpi) {
1269 mgr->proposed_vcpis[i] = NULL;
1270 clear_bit(i + 1, &mgr->payload_mask);
1271 }
1272 }
1273 mutex_unlock(&mgr->payload_lock);
1274}
1275
1276static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
1277 struct drm_dp_sideband_msg_tx *txmsg)
1278{
1279 unsigned int state;
1280
1281
1282
1283
1284
1285
1286 state = READ_ONCE(txmsg->state);
1287 return (state == DRM_DP_SIDEBAND_TX_RX ||
1288 state == DRM_DP_SIDEBAND_TX_TIMEOUT);
1289}
1290
1291static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
1292 struct drm_dp_sideband_msg_tx *txmsg)
1293{
1294 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1295 unsigned long wait_timeout = msecs_to_jiffies(4000);
1296 unsigned long wait_expires = jiffies + wait_timeout;
1297 int ret;
1298
1299 for (;;) {
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313 ret = wait_event_timeout(mgr->tx_waitq,
1314 check_txmsg_state(mgr, txmsg),
1315 mgr->cbs->poll_hpd_irq ?
1316 msecs_to_jiffies(50) :
1317 wait_timeout);
1318
1319 if (ret || !mgr->cbs->poll_hpd_irq ||
1320 time_after(jiffies, wait_expires))
1321 break;
1322
1323 mgr->cbs->poll_hpd_irq(mgr);
1324 }
1325
1326 mutex_lock(&mgr->qlock);
1327 if (ret > 0) {
1328 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
1329 ret = -EIO;
1330 goto out;
1331 }
1332 } else {
1333 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
1334
1335
1336 ret = -EIO;
1337
1338
1339 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
1340 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
1341 txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
1342 list_del(&txmsg->next);
1343 }
1344out:
1345 if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
1346 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
1347
1348 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
1349 }
1350 mutex_unlock(&mgr->qlock);
1351
1352 drm_dp_mst_kick_tx(mgr);
1353 return ret;
1354}
1355
1356static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
1357{
1358 struct drm_dp_mst_branch *mstb;
1359
1360 mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
1361 if (!mstb)
1362 return NULL;
1363
1364 mstb->lct = lct;
1365 if (lct > 1)
1366 memcpy(mstb->rad, rad, lct / 2);
1367 INIT_LIST_HEAD(&mstb->ports);
1368 kref_init(&mstb->topology_kref);
1369 kref_init(&mstb->malloc_kref);
1370 return mstb;
1371}
1372
1373static void drm_dp_free_mst_branch_device(struct kref *kref)
1374{
1375 struct drm_dp_mst_branch *mstb =
1376 container_of(kref, struct drm_dp_mst_branch, malloc_kref);
1377
1378 if (mstb->port_parent)
1379 drm_dp_mst_put_port_malloc(mstb->port_parent);
1380
1381 kfree(mstb);
1382}
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483static void
1484drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
1485{
1486 kref_get(&mstb->malloc_kref);
1487 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
1488}
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501static void
1502drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
1503{
1504 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
1505 kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
1506}
1507
1508static void drm_dp_free_mst_port(struct kref *kref)
1509{
1510 struct drm_dp_mst_port *port =
1511 container_of(kref, struct drm_dp_mst_port, malloc_kref);
1512
1513 drm_dp_mst_put_mstb_malloc(port->parent);
1514 kfree(port);
1515}
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534void
1535drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
1536{
1537 kref_get(&port->malloc_kref);
1538 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref));
1539}
1540EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552void
1553drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
1554{
1555 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
1556 kref_put(&port->malloc_kref, drm_dp_free_mst_port);
1557}
1558EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
1559
1560#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
1561
1562#define STACK_DEPTH 8
1563
1564static noinline void
1565__topology_ref_save(struct drm_dp_mst_topology_mgr *mgr,
1566 struct drm_dp_mst_topology_ref_history *history,
1567 enum drm_dp_mst_topology_ref_type type)
1568{
1569 struct drm_dp_mst_topology_ref_entry *entry = NULL;
1570 depot_stack_handle_t backtrace;
1571 ulong stack_entries[STACK_DEPTH];
1572 uint n;
1573 int i;
1574
1575 n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1);
1576 backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL);
1577 if (!backtrace)
1578 return;
1579
1580
1581 for (i = 0; i < history->len; i++) {
1582 if (history->entries[i].backtrace == backtrace) {
1583 entry = &history->entries[i];
1584 break;
1585 }
1586 }
1587
1588
1589 if (!entry) {
1590 struct drm_dp_mst_topology_ref_entry *new;
1591 int new_len = history->len + 1;
1592
1593 new = krealloc(history->entries, sizeof(*new) * new_len,
1594 GFP_KERNEL);
1595 if (!new)
1596 return;
1597
1598 entry = &new[history->len];
1599 history->len = new_len;
1600 history->entries = new;
1601
1602 entry->backtrace = backtrace;
1603 entry->type = type;
1604 entry->count = 0;
1605 }
1606 entry->count++;
1607 entry->ts_nsec = ktime_get_ns();
1608}
1609
1610static int
1611topology_ref_history_cmp(const void *a, const void *b)
1612{
1613 const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b;
1614
1615 if (entry_a->ts_nsec > entry_b->ts_nsec)
1616 return 1;
1617 else if (entry_a->ts_nsec < entry_b->ts_nsec)
1618 return -1;
1619 else
1620 return 0;
1621}
1622
1623static inline const char *
1624topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)
1625{
1626 if (type == DRM_DP_MST_TOPOLOGY_REF_GET)
1627 return "get";
1628 else
1629 return "put";
1630}
1631
1632static void
1633__dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history,
1634 void *ptr, const char *type_str)
1635{
1636 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
1637 char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
1638 int i;
1639
1640 if (!buf)
1641 return;
1642
1643 if (!history->len)
1644 goto out;
1645
1646
1647
1648
1649 sort(history->entries, history->len, sizeof(*history->entries),
1650 topology_ref_history_cmp, NULL);
1651
1652 drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n",
1653 type_str, ptr);
1654
1655 for (i = 0; i < history->len; i++) {
1656 const struct drm_dp_mst_topology_ref_entry *entry =
1657 &history->entries[i];
1658 ulong *entries;
1659 uint nr_entries;
1660 u64 ts_nsec = entry->ts_nsec;
1661 u32 rem_nsec = do_div(ts_nsec, 1000000000);
1662
1663 nr_entries = stack_depot_fetch(entry->backtrace, &entries);
1664 stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 4);
1665
1666 drm_printf(&p, " %d %ss (last at %5llu.%06u):\n%s",
1667 entry->count,
1668 topology_ref_type_to_str(entry->type),
1669 ts_nsec, rem_nsec / 1000, buf);
1670 }
1671
1672
1673 kfree(history->entries);
1674out:
1675 kfree(buf);
1676}
1677
1678static __always_inline void
1679drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb)
1680{
1681 __dump_topology_ref_history(&mstb->topology_ref_history, mstb,
1682 "MSTB");
1683}
1684
1685static __always_inline void
1686drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port)
1687{
1688 __dump_topology_ref_history(&port->topology_ref_history, port,
1689 "Port");
1690}
1691
1692static __always_inline void
1693save_mstb_topology_ref(struct drm_dp_mst_branch *mstb,
1694 enum drm_dp_mst_topology_ref_type type)
1695{
1696 __topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type);
1697}
1698
1699static __always_inline void
1700save_port_topology_ref(struct drm_dp_mst_port *port,
1701 enum drm_dp_mst_topology_ref_type type)
1702{
1703 __topology_ref_save(port->mgr, &port->topology_ref_history, type);
1704}
1705
1706static inline void
1707topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr)
1708{
1709 mutex_lock(&mgr->topology_ref_history_lock);
1710}
1711
1712static inline void
1713topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr)
1714{
1715 mutex_unlock(&mgr->topology_ref_history_lock);
1716}
1717#else
1718static inline void
1719topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {}
1720static inline void
1721topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {}
1722static inline void
1723drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {}
1724static inline void
1725drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {}
1726#define save_mstb_topology_ref(mstb, type)
1727#define save_port_topology_ref(port, type)
1728#endif
1729
1730static void drm_dp_destroy_mst_branch_device(struct kref *kref)
1731{
1732 struct drm_dp_mst_branch *mstb =
1733 container_of(kref, struct drm_dp_mst_branch, topology_kref);
1734 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1735
1736 drm_dp_mst_dump_mstb_topology_history(mstb);
1737
1738 INIT_LIST_HEAD(&mstb->destroy_next);
1739
1740
1741
1742
1743
1744 mutex_lock(&mgr->delayed_destroy_lock);
1745 list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list);
1746 mutex_unlock(&mgr->delayed_destroy_lock);
1747 queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);
1748}
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772static int __must_check
1773drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
1774{
1775 int ret;
1776
1777 topology_ref_history_lock(mstb->mgr);
1778 ret = kref_get_unless_zero(&mstb->topology_kref);
1779 if (ret) {
1780 DRM_DEBUG("mstb %p (%d)\n",
1781 mstb, kref_read(&mstb->topology_kref));
1782 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
1783 }
1784
1785 topology_ref_history_unlock(mstb->mgr);
1786
1787 return ret;
1788}
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
1805{
1806 topology_ref_history_lock(mstb->mgr);
1807
1808 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
1809 WARN_ON(kref_read(&mstb->topology_kref) == 0);
1810 kref_get(&mstb->topology_kref);
1811 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
1812
1813 topology_ref_history_unlock(mstb->mgr);
1814}
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828static void
1829drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
1830{
1831 topology_ref_history_lock(mstb->mgr);
1832
1833 DRM_DEBUG("mstb %p (%d)\n",
1834 mstb, kref_read(&mstb->topology_kref) - 1);
1835 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT);
1836
1837 topology_ref_history_unlock(mstb->mgr);
1838 kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
1839}
1840
1841static void drm_dp_destroy_port(struct kref *kref)
1842{
1843 struct drm_dp_mst_port *port =
1844 container_of(kref, struct drm_dp_mst_port, topology_kref);
1845 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
1846
1847 drm_dp_mst_dump_port_topology_history(port);
1848
1849
1850 if (port->input) {
1851 drm_dp_mst_put_port_malloc(port);
1852 return;
1853 }
1854
1855 kfree(port->cached_edid);
1856
1857
1858
1859
1860
1861 mutex_lock(&mgr->delayed_destroy_lock);
1862 list_add(&port->next, &mgr->destroy_port_list);
1863 mutex_unlock(&mgr->delayed_destroy_lock);
1864 queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);
1865}
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889static int __must_check
1890drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
1891{
1892 int ret;
1893
1894 topology_ref_history_lock(port->mgr);
1895 ret = kref_get_unless_zero(&port->topology_kref);
1896 if (ret) {
1897 DRM_DEBUG("port %p (%d)\n",
1898 port, kref_read(&port->topology_kref));
1899 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
1900 }
1901
1902 topology_ref_history_unlock(port->mgr);
1903 return ret;
1904}
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
1920{
1921 topology_ref_history_lock(port->mgr);
1922
1923 WARN_ON(kref_read(&port->topology_kref) == 0);
1924 kref_get(&port->topology_kref);
1925 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref));
1926 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
1927
1928 topology_ref_history_unlock(port->mgr);
1929}
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
1943{
1944 topology_ref_history_lock(port->mgr);
1945
1946 DRM_DEBUG("port %p (%d)\n",
1947 port, kref_read(&port->topology_kref) - 1);
1948 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT);
1949
1950 topology_ref_history_unlock(port->mgr);
1951 kref_put(&port->topology_kref, drm_dp_destroy_port);
1952}
1953
1954static struct drm_dp_mst_branch *
1955drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
1956 struct drm_dp_mst_branch *to_find)
1957{
1958 struct drm_dp_mst_port *port;
1959 struct drm_dp_mst_branch *rmstb;
1960
1961 if (to_find == mstb)
1962 return mstb;
1963
1964 list_for_each_entry(port, &mstb->ports, next) {
1965 if (port->mstb) {
1966 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1967 port->mstb, to_find);
1968 if (rmstb)
1969 return rmstb;
1970 }
1971 }
1972 return NULL;
1973}
1974
1975static struct drm_dp_mst_branch *
1976drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
1977 struct drm_dp_mst_branch *mstb)
1978{
1979 struct drm_dp_mst_branch *rmstb = NULL;
1980
1981 mutex_lock(&mgr->lock);
1982 if (mgr->mst_primary) {
1983 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1984 mgr->mst_primary, mstb);
1985
1986 if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
1987 rmstb = NULL;
1988 }
1989 mutex_unlock(&mgr->lock);
1990 return rmstb;
1991}
1992
1993static struct drm_dp_mst_port *
1994drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
1995 struct drm_dp_mst_port *to_find)
1996{
1997 struct drm_dp_mst_port *port, *mport;
1998
1999 list_for_each_entry(port, &mstb->ports, next) {
2000 if (port == to_find)
2001 return port;
2002
2003 if (port->mstb) {
2004 mport = drm_dp_mst_topology_get_port_validated_locked(
2005 port->mstb, to_find);
2006 if (mport)
2007 return mport;
2008 }
2009 }
2010 return NULL;
2011}
2012
2013static struct drm_dp_mst_port *
2014drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
2015 struct drm_dp_mst_port *port)
2016{
2017 struct drm_dp_mst_port *rport = NULL;
2018
2019 mutex_lock(&mgr->lock);
2020 if (mgr->mst_primary) {
2021 rport = drm_dp_mst_topology_get_port_validated_locked(
2022 mgr->mst_primary, port);
2023
2024 if (rport && !drm_dp_mst_topology_try_get_port(rport))
2025 rport = NULL;
2026 }
2027 mutex_unlock(&mgr->lock);
2028 return rport;
2029}
2030
2031static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
2032{
2033 struct drm_dp_mst_port *port;
2034 int ret;
2035
2036 list_for_each_entry(port, &mstb->ports, next) {
2037 if (port->port_num == port_num) {
2038 ret = drm_dp_mst_topology_try_get_port(port);
2039 return ret ? port : NULL;
2040 }
2041 }
2042
2043 return NULL;
2044}
2045
2046
2047
2048
2049
2050
2051static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
2052 u8 *rad)
2053{
2054 int parent_lct = port->parent->lct;
2055 int shift = 4;
2056 int idx = (parent_lct - 1) / 2;
2057
2058 if (parent_lct > 1) {
2059 memcpy(rad, port->parent->rad, idx + 1);
2060 shift = (parent_lct % 2) ? 4 : 0;
2061 } else
2062 rad[0] = 0;
2063
2064 rad[idx] |= port->port_num << shift;
2065 return parent_lct + 1;
2066}
2067
2068static bool drm_dp_mst_is_end_device(u8 pdt, bool mcs)
2069{
2070 switch (pdt) {
2071 case DP_PEER_DEVICE_DP_LEGACY_CONV:
2072 case DP_PEER_DEVICE_SST_SINK:
2073 return true;
2074 case DP_PEER_DEVICE_MST_BRANCHING:
2075
2076 if (!mcs)
2077 return true;
2078
2079 return false;
2080 }
2081 return true;
2082}
2083
2084static int
2085drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
2086 bool new_mcs)
2087{
2088 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2089 struct drm_dp_mst_branch *mstb;
2090 u8 rad[8], lct;
2091 int ret = 0;
2092
2093 if (port->pdt == new_pdt && port->mcs == new_mcs)
2094 return 0;
2095
2096
2097 if (port->pdt != DP_PEER_DEVICE_NONE) {
2098 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
2099
2100
2101
2102
2103 if (new_pdt != DP_PEER_DEVICE_NONE &&
2104 drm_dp_mst_is_end_device(new_pdt, new_mcs)) {
2105 port->pdt = new_pdt;
2106 port->mcs = new_mcs;
2107 return 0;
2108 }
2109
2110
2111 drm_dp_mst_unregister_i2c_bus(port);
2112 } else {
2113 mutex_lock(&mgr->lock);
2114 drm_dp_mst_topology_put_mstb(port->mstb);
2115 port->mstb = NULL;
2116 mutex_unlock(&mgr->lock);
2117 }
2118 }
2119
2120 port->pdt = new_pdt;
2121 port->mcs = new_mcs;
2122
2123 if (port->pdt != DP_PEER_DEVICE_NONE) {
2124 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
2125
2126 ret = drm_dp_mst_register_i2c_bus(port);
2127 } else {
2128 lct = drm_dp_calculate_rad(port, rad);
2129 mstb = drm_dp_add_mst_branch_device(lct, rad);
2130 if (!mstb) {
2131 ret = -ENOMEM;
2132 DRM_ERROR("Failed to create MSTB for port %p",
2133 port);
2134 goto out;
2135 }
2136
2137 mutex_lock(&mgr->lock);
2138 port->mstb = mstb;
2139 mstb->mgr = port->mgr;
2140 mstb->port_parent = port;
2141
2142
2143
2144
2145
2146 drm_dp_mst_get_port_malloc(port);
2147 mutex_unlock(&mgr->lock);
2148
2149
2150 ret = 1;
2151 }
2152 }
2153
2154out:
2155 if (ret < 0)
2156 port->pdt = DP_PEER_DEVICE_NONE;
2157 return ret;
2158}
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
2174 unsigned int offset, void *buffer, size_t size)
2175{
2176 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
2177 aux);
2178
2179 return drm_dp_send_dpcd_read(port->mgr, port,
2180 offset, size, buffer);
2181}
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
2197 unsigned int offset, void *buffer, size_t size)
2198{
2199 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
2200 aux);
2201
2202 return drm_dp_send_dpcd_write(port->mgr, port,
2203 offset, size, buffer);
2204}
2205
2206static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
2207{
2208 int ret = 0;
2209
2210 memcpy(mstb->guid, guid, 16);
2211
2212 if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
2213 if (mstb->port_parent) {
2214 ret = drm_dp_send_dpcd_write(mstb->mgr,
2215 mstb->port_parent,
2216 DP_GUID, 16, mstb->guid);
2217 } else {
2218 ret = drm_dp_dpcd_write(mstb->mgr->aux,
2219 DP_GUID, mstb->guid, 16);
2220 }
2221 }
2222
2223 if (ret < 16 && ret > 0)
2224 return -EPROTO;
2225
2226 return ret == 16 ? 0 : ret;
2227}
2228
2229static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
2230 int pnum,
2231 char *proppath,
2232 size_t proppath_size)
2233{
2234 int i;
2235 char temp[8];
2236
2237 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
2238 for (i = 0; i < (mstb->lct - 1); i++) {
2239 int shift = (i % 2) ? 0 : 4;
2240 int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
2241
2242 snprintf(temp, sizeof(temp), "-%d", port_num);
2243 strlcat(proppath, temp, proppath_size);
2244 }
2245 snprintf(temp, sizeof(temp), "-%d", pnum);
2246 strlcat(proppath, temp, proppath_size);
2247}
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260int drm_dp_mst_connector_late_register(struct drm_connector *connector,
2261 struct drm_dp_mst_port *port)
2262{
2263 DRM_DEBUG_KMS("registering %s remote bus for %s\n",
2264 port->aux.name, connector->kdev->kobj.name);
2265
2266 port->aux.dev = connector->kdev;
2267 return drm_dp_aux_register_devnode(&port->aux);
2268}
2269EXPORT_SYMBOL(drm_dp_mst_connector_late_register);
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
2281 struct drm_dp_mst_port *port)
2282{
2283 DRM_DEBUG_KMS("unregistering %s remote bus for %s\n",
2284 port->aux.name, connector->kdev->kobj.name);
2285 drm_dp_aux_unregister_devnode(&port->aux);
2286}
2287EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
2288
2289static void
2290drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
2291 struct drm_dp_mst_port *port)
2292{
2293 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2294 char proppath[255];
2295 int ret;
2296
2297 build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
2298 port->connector = mgr->cbs->add_connector(mgr, port, proppath);
2299 if (!port->connector) {
2300 ret = -ENOMEM;
2301 goto error;
2302 }
2303
2304 if (port->pdt != DP_PEER_DEVICE_NONE &&
2305 drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
2306 port->cached_edid = drm_get_edid(port->connector,
2307 &port->aux.ddc);
2308 drm_connector_set_tile_property(port->connector);
2309 }
2310
2311 drm_connector_register(port->connector);
2312 return;
2313
2314error:
2315 DRM_ERROR("Failed to create connector for port %p: %d\n", port, ret);
2316}
2317
2318
2319
2320
2321
2322static void
2323drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr,
2324 struct drm_dp_mst_port *port)
2325{
2326 mutex_lock(&mgr->lock);
2327 port->parent->num_ports--;
2328 list_del(&port->next);
2329 mutex_unlock(&mgr->lock);
2330 drm_dp_mst_topology_put_port(port);
2331}
2332
2333static struct drm_dp_mst_port *
2334drm_dp_mst_add_port(struct drm_device *dev,
2335 struct drm_dp_mst_topology_mgr *mgr,
2336 struct drm_dp_mst_branch *mstb, u8 port_number)
2337{
2338 struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL);
2339
2340 if (!port)
2341 return NULL;
2342
2343 kref_init(&port->topology_kref);
2344 kref_init(&port->malloc_kref);
2345 port->parent = mstb;
2346 port->port_num = port_number;
2347 port->mgr = mgr;
2348 port->aux.name = "DPMST";
2349 port->aux.dev = dev->dev;
2350 port->aux.is_remote = true;
2351
2352
2353 drm_dp_remote_aux_init(&port->aux);
2354
2355
2356
2357
2358
2359 drm_dp_mst_get_mstb_malloc(mstb);
2360
2361 return port;
2362}
2363
2364static int
2365drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
2366 struct drm_device *dev,
2367 struct drm_dp_link_addr_reply_port *port_msg)
2368{
2369 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
2370 struct drm_dp_mst_port *port;
2371 int old_ddps = 0, ret;
2372 u8 new_pdt = DP_PEER_DEVICE_NONE;
2373 bool new_mcs = 0;
2374 bool created = false, send_link_addr = false, changed = false;
2375
2376 port = drm_dp_get_port(mstb, port_msg->port_number);
2377 if (!port) {
2378 port = drm_dp_mst_add_port(dev, mgr, mstb,
2379 port_msg->port_number);
2380 if (!port)
2381 return -ENOMEM;
2382 created = true;
2383 changed = true;
2384 } else if (!port->input && port_msg->input_port && port->connector) {
2385
2386
2387
2388 drm_dp_mst_topology_unlink_port(mgr, port);
2389 drm_dp_mst_topology_put_port(port);
2390 port = drm_dp_mst_add_port(dev, mgr, mstb,
2391 port_msg->port_number);
2392 if (!port)
2393 return -ENOMEM;
2394 changed = true;
2395 created = true;
2396 } else if (port->input && !port_msg->input_port) {
2397 changed = true;
2398 } else if (port->connector) {
2399
2400
2401
2402 drm_modeset_lock(&mgr->base.lock, NULL);
2403
2404 old_ddps = port->ddps;
2405 changed = port->ddps != port_msg->ddps ||
2406 (port->ddps &&
2407 (port->ldps != port_msg->legacy_device_plug_status ||
2408 port->dpcd_rev != port_msg->dpcd_revision ||
2409 port->mcs != port_msg->mcs ||
2410 port->pdt != port_msg->peer_device_type ||
2411 port->num_sdp_stream_sinks !=
2412 port_msg->num_sdp_stream_sinks));
2413 }
2414
2415 port->input = port_msg->input_port;
2416 if (!port->input)
2417 new_pdt = port_msg->peer_device_type;
2418 new_mcs = port_msg->mcs;
2419 port->ddps = port_msg->ddps;
2420 port->ldps = port_msg->legacy_device_plug_status;
2421 port->dpcd_rev = port_msg->dpcd_revision;
2422 port->num_sdp_streams = port_msg->num_sdp_streams;
2423 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
2424
2425
2426
2427 if (created) {
2428 mutex_lock(&mgr->lock);
2429 drm_dp_mst_topology_get_port(port);
2430 list_add(&port->next, &mstb->ports);
2431 mstb->num_ports++;
2432 mutex_unlock(&mgr->lock);
2433 }
2434
2435
2436
2437
2438
2439 if (old_ddps != port->ddps || !created) {
2440 if (port->ddps && !port->input) {
2441 ret = drm_dp_send_enum_path_resources(mgr, mstb,
2442 port);
2443 if (ret == 1)
2444 changed = true;
2445 } else {
2446 port->full_pbn = 0;
2447 }
2448 }
2449
2450 ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
2451 if (ret == 1) {
2452 send_link_addr = true;
2453 } else if (ret < 0) {
2454 DRM_ERROR("Failed to change PDT on port %p: %d\n",
2455 port, ret);
2456 goto fail;
2457 }
2458
2459
2460
2461
2462
2463
2464 if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
2465 port->mcs)
2466 send_link_addr = true;
2467
2468 if (port->connector)
2469 drm_modeset_unlock(&mgr->base.lock);
2470 else if (!port->input)
2471 drm_dp_mst_port_add_connector(mstb, port);
2472
2473 if (send_link_addr && port->mstb) {
2474 ret = drm_dp_send_link_address(mgr, port->mstb);
2475 if (ret == 1)
2476 changed = true;
2477 else if (ret < 0)
2478 goto fail_put;
2479 }
2480
2481
2482 drm_dp_mst_topology_put_port(port);
2483 return changed;
2484
2485fail:
2486 drm_dp_mst_topology_unlink_port(mgr, port);
2487 if (port->connector)
2488 drm_modeset_unlock(&mgr->base.lock);
2489fail_put:
2490 drm_dp_mst_topology_put_port(port);
2491 return ret;
2492}
2493
2494static void
2495drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
2496 struct drm_dp_connection_status_notify *conn_stat)
2497{
2498 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
2499 struct drm_dp_mst_port *port;
2500 int old_ddps, old_input, ret, i;
2501 u8 new_pdt;
2502 bool new_mcs;
2503 bool dowork = false, create_connector = false;
2504
2505 port = drm_dp_get_port(mstb, conn_stat->port_number);
2506 if (!port)
2507 return;
2508
2509 if (port->connector) {
2510 if (!port->input && conn_stat->input_port) {
2511
2512
2513
2514
2515
2516 drm_dp_mst_topology_unlink_port(mgr, port);
2517 mstb->link_address_sent = false;
2518 dowork = true;
2519 goto out;
2520 }
2521
2522
2523 drm_modeset_lock(&mgr->base.lock, NULL);
2524 } else if (port->input && !conn_stat->input_port) {
2525 create_connector = true;
2526
2527 mstb->link_address_sent = false;
2528 dowork = true;
2529 }
2530
2531 old_ddps = port->ddps;
2532 old_input = port->input;
2533 port->input = conn_stat->input_port;
2534 port->ldps = conn_stat->legacy_device_plug_status;
2535 port->ddps = conn_stat->displayport_device_plug_status;
2536
2537 if (old_ddps != port->ddps) {
2538 if (port->ddps && !port->input)
2539 drm_dp_send_enum_path_resources(mgr, mstb, port);
2540 else
2541 port->full_pbn = 0;
2542 }
2543
2544 new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
2545 new_mcs = conn_stat->message_capability_status;
2546 ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
2547 if (ret == 1) {
2548 dowork = true;
2549 } else if (ret < 0) {
2550 DRM_ERROR("Failed to change PDT for port %p: %d\n",
2551 port, ret);
2552 dowork = false;
2553 }
2554
2555 if (!old_input && old_ddps != port->ddps && !port->ddps) {
2556 for (i = 0; i < mgr->max_payloads; i++) {
2557 struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
2558 struct drm_dp_mst_port *port_validated;
2559
2560 if (!vcpi)
2561 continue;
2562
2563 port_validated =
2564 container_of(vcpi, struct drm_dp_mst_port, vcpi);
2565 port_validated =
2566 drm_dp_mst_topology_get_port_validated(mgr, port_validated);
2567 if (!port_validated) {
2568 mutex_lock(&mgr->payload_lock);
2569 vcpi->num_slots = 0;
2570 mutex_unlock(&mgr->payload_lock);
2571 } else {
2572 drm_dp_mst_topology_put_port(port_validated);
2573 }
2574 }
2575 }
2576
2577 if (port->connector)
2578 drm_modeset_unlock(&mgr->base.lock);
2579 else if (create_connector)
2580 drm_dp_mst_port_add_connector(mstb, port);
2581
2582out:
2583 drm_dp_mst_topology_put_port(port);
2584 if (dowork)
2585 queue_work(system_long_wq, &mstb->mgr->work);
2586}
2587
2588static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
2589 u8 lct, u8 *rad)
2590{
2591 struct drm_dp_mst_branch *mstb;
2592 struct drm_dp_mst_port *port;
2593 int i, ret;
2594
2595
2596 mutex_lock(&mgr->lock);
2597 mstb = mgr->mst_primary;
2598
2599 if (!mstb)
2600 goto out;
2601
2602 for (i = 0; i < lct - 1; i++) {
2603 int shift = (i % 2) ? 0 : 4;
2604 int port_num = (rad[i / 2] >> shift) & 0xf;
2605
2606 list_for_each_entry(port, &mstb->ports, next) {
2607 if (port->port_num == port_num) {
2608 mstb = port->mstb;
2609 if (!mstb) {
2610 DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
2611 goto out;
2612 }
2613
2614 break;
2615 }
2616 }
2617 }
2618 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2619 if (!ret)
2620 mstb = NULL;
2621out:
2622 mutex_unlock(&mgr->lock);
2623 return mstb;
2624}
2625
2626static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
2627 struct drm_dp_mst_branch *mstb,
2628 const uint8_t *guid)
2629{
2630 struct drm_dp_mst_branch *found_mstb;
2631 struct drm_dp_mst_port *port;
2632
2633 if (memcmp(mstb->guid, guid, 16) == 0)
2634 return mstb;
2635
2636
2637 list_for_each_entry(port, &mstb->ports, next) {
2638 if (!port->mstb)
2639 continue;
2640
2641 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
2642
2643 if (found_mstb)
2644 return found_mstb;
2645 }
2646
2647 return NULL;
2648}
2649
2650static struct drm_dp_mst_branch *
2651drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
2652 const uint8_t *guid)
2653{
2654 struct drm_dp_mst_branch *mstb;
2655 int ret;
2656
2657
2658 mutex_lock(&mgr->lock);
2659
2660 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
2661 if (mstb) {
2662 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2663 if (!ret)
2664 mstb = NULL;
2665 }
2666
2667 mutex_unlock(&mgr->lock);
2668 return mstb;
2669}
2670
2671static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2672 struct drm_dp_mst_branch *mstb)
2673{
2674 struct drm_dp_mst_port *port;
2675 int ret;
2676 bool changed = false;
2677
2678 if (!mstb->link_address_sent) {
2679 ret = drm_dp_send_link_address(mgr, mstb);
2680 if (ret == 1)
2681 changed = true;
2682 else if (ret < 0)
2683 return ret;
2684 }
2685
2686 list_for_each_entry(port, &mstb->ports, next) {
2687 struct drm_dp_mst_branch *mstb_child = NULL;
2688
2689 if (port->input || !port->ddps)
2690 continue;
2691
2692 if (port->mstb)
2693 mstb_child = drm_dp_mst_topology_get_mstb_validated(
2694 mgr, port->mstb);
2695
2696 if (mstb_child) {
2697 ret = drm_dp_check_and_send_link_address(mgr,
2698 mstb_child);
2699 drm_dp_mst_topology_put_mstb(mstb_child);
2700 if (ret == 1)
2701 changed = true;
2702 else if (ret < 0)
2703 return ret;
2704 }
2705 }
2706
2707 return changed;
2708}
2709
2710static void drm_dp_mst_link_probe_work(struct work_struct *work)
2711{
2712 struct drm_dp_mst_topology_mgr *mgr =
2713 container_of(work, struct drm_dp_mst_topology_mgr, work);
2714 struct drm_device *dev = mgr->dev;
2715 struct drm_dp_mst_branch *mstb;
2716 int ret;
2717 bool clear_payload_id_table;
2718
2719 mutex_lock(&mgr->probe_lock);
2720
2721 mutex_lock(&mgr->lock);
2722 clear_payload_id_table = !mgr->payload_id_table_cleared;
2723 mgr->payload_id_table_cleared = true;
2724
2725 mstb = mgr->mst_primary;
2726 if (mstb) {
2727 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2728 if (!ret)
2729 mstb = NULL;
2730 }
2731 mutex_unlock(&mgr->lock);
2732 if (!mstb) {
2733 mutex_unlock(&mgr->probe_lock);
2734 return;
2735 }
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745 if (clear_payload_id_table) {
2746 DRM_DEBUG_KMS("Clearing payload ID table\n");
2747 drm_dp_send_clear_payload_id_table(mgr, mstb);
2748 }
2749
2750 ret = drm_dp_check_and_send_link_address(mgr, mstb);
2751 drm_dp_mst_topology_put_mstb(mstb);
2752
2753 mutex_unlock(&mgr->probe_lock);
2754 if (ret)
2755 drm_kms_helper_hotplug_event(dev);
2756}
2757
2758static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
2759 u8 *guid)
2760{
2761 u64 salt;
2762
2763 if (memchr_inv(guid, 0, 16))
2764 return true;
2765
2766 salt = get_jiffies_64();
2767
2768 memcpy(&guid[0], &salt, sizeof(u64));
2769 memcpy(&guid[8], &salt, sizeof(u64));
2770
2771 return false;
2772}
2773
2774static void build_dpcd_read(struct drm_dp_sideband_msg_tx *msg,
2775 u8 port_num, u32 offset, u8 num_bytes)
2776{
2777 struct drm_dp_sideband_msg_req_body req;
2778
2779 req.req_type = DP_REMOTE_DPCD_READ;
2780 req.u.dpcd_read.port_number = port_num;
2781 req.u.dpcd_read.dpcd_address = offset;
2782 req.u.dpcd_read.num_bytes = num_bytes;
2783 drm_dp_encode_sideband_req(&req, msg);
2784}
2785
2786static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
2787 bool up, u8 *msg, int len)
2788{
2789 int ret;
2790 int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
2791 int tosend, total, offset;
2792 int retries = 0;
2793
2794retry:
2795 total = len;
2796 offset = 0;
2797 do {
2798 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
2799
2800 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
2801 &msg[offset],
2802 tosend);
2803 if (ret != tosend) {
2804 if (ret == -EIO && retries < 5) {
2805 retries++;
2806 goto retry;
2807 }
2808 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
2809
2810 return -EIO;
2811 }
2812 offset += tosend;
2813 total -= tosend;
2814 } while (total > 0);
2815 return 0;
2816}
2817
2818static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
2819 struct drm_dp_sideband_msg_tx *txmsg)
2820{
2821 struct drm_dp_mst_branch *mstb = txmsg->dst;
2822 u8 req_type;
2823
2824 req_type = txmsg->msg[0] & 0x7f;
2825 if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
2826 req_type == DP_RESOURCE_STATUS_NOTIFY)
2827 hdr->broadcast = 1;
2828 else
2829 hdr->broadcast = 0;
2830 hdr->path_msg = txmsg->path_msg;
2831 hdr->lct = mstb->lct;
2832 hdr->lcr = mstb->lct - 1;
2833 if (mstb->lct > 1)
2834 memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
2835
2836 return 0;
2837}
2838
2839
2840
2841static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
2842 struct drm_dp_sideband_msg_tx *txmsg,
2843 bool up)
2844{
2845 u8 chunk[48];
2846 struct drm_dp_sideband_msg_hdr hdr;
2847 int len, space, idx, tosend;
2848 int ret;
2849
2850 if (txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
2851 return 0;
2852
2853 memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
2854
2855 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED)
2856 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
2857
2858
2859 ret = set_hdr_from_dst_qlock(&hdr, txmsg);
2860 if (ret < 0)
2861 return ret;
2862
2863
2864 len = txmsg->cur_len - txmsg->cur_offset;
2865
2866
2867 space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
2868
2869 tosend = min(len, space);
2870 if (len == txmsg->cur_len)
2871 hdr.somt = 1;
2872 if (space >= len)
2873 hdr.eomt = 1;
2874
2875
2876 hdr.msg_len = tosend + 1;
2877 drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
2878 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
2879
2880 drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
2881 idx += tosend + 1;
2882
2883 ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
2884 if (unlikely(ret) && drm_debug_enabled(DRM_UT_DP)) {
2885 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
2886
2887 drm_printf(&p, "sideband msg failed to send\n");
2888 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2889 return ret;
2890 }
2891
2892 txmsg->cur_offset += tosend;
2893 if (txmsg->cur_offset == txmsg->cur_len) {
2894 txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
2895 return 1;
2896 }
2897 return 0;
2898}
2899
2900static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
2901{
2902 struct drm_dp_sideband_msg_tx *txmsg;
2903 int ret;
2904
2905 WARN_ON(!mutex_is_locked(&mgr->qlock));
2906
2907
2908 if (list_empty(&mgr->tx_msg_downq))
2909 return;
2910
2911 txmsg = list_first_entry(&mgr->tx_msg_downq,
2912 struct drm_dp_sideband_msg_tx, next);
2913 ret = process_single_tx_qlock(mgr, txmsg, false);
2914 if (ret < 0) {
2915 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
2916 list_del(&txmsg->next);
2917 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
2918 wake_up_all(&mgr->tx_waitq);
2919 }
2920}
2921
2922static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
2923 struct drm_dp_sideband_msg_tx *txmsg)
2924{
2925 mutex_lock(&mgr->qlock);
2926 list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
2927
2928 if (drm_debug_enabled(DRM_UT_DP)) {
2929 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
2930
2931 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2932 }
2933
2934 if (list_is_singular(&mgr->tx_msg_downq))
2935 process_single_down_tx_qlock(mgr);
2936 mutex_unlock(&mgr->qlock);
2937}
2938
2939static void
2940drm_dp_dump_link_address(struct drm_dp_link_address_ack_reply *reply)
2941{
2942 struct drm_dp_link_addr_reply_port *port_reply;
2943 int i;
2944
2945 for (i = 0; i < reply->nports; i++) {
2946 port_reply = &reply->ports[i];
2947 DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n",
2948 i,
2949 port_reply->input_port,
2950 port_reply->peer_device_type,
2951 port_reply->port_number,
2952 port_reply->dpcd_revision,
2953 port_reply->mcs,
2954 port_reply->ddps,
2955 port_reply->legacy_device_plug_status,
2956 port_reply->num_sdp_streams,
2957 port_reply->num_sdp_stream_sinks);
2958 }
2959}
2960
2961static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2962 struct drm_dp_mst_branch *mstb)
2963{
2964 struct drm_dp_sideband_msg_tx *txmsg;
2965 struct drm_dp_link_address_ack_reply *reply;
2966 struct drm_dp_mst_port *port, *tmp;
2967 int i, ret, port_mask = 0;
2968 bool changed = false;
2969
2970 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2971 if (!txmsg)
2972 return -ENOMEM;
2973
2974 txmsg->dst = mstb;
2975 build_link_address(txmsg);
2976
2977 mstb->link_address_sent = true;
2978 drm_dp_queue_down_tx(mgr, txmsg);
2979
2980
2981 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2982 if (ret <= 0) {
2983 DRM_ERROR("Sending link address failed with %d\n", ret);
2984 goto out;
2985 }
2986 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2987 DRM_ERROR("link address NAK received\n");
2988 ret = -EIO;
2989 goto out;
2990 }
2991
2992 reply = &txmsg->reply.u.link_addr;
2993 DRM_DEBUG_KMS("link address reply: %d\n", reply->nports);
2994 drm_dp_dump_link_address(reply);
2995
2996 ret = drm_dp_check_mstb_guid(mstb, reply->guid);
2997 if (ret) {
2998 char buf[64];
2999
3000 drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, buf, sizeof(buf));
3001 DRM_ERROR("GUID check on %s failed: %d\n",
3002 buf, ret);
3003 goto out;
3004 }
3005
3006 for (i = 0; i < reply->nports; i++) {
3007 port_mask |= BIT(reply->ports[i].port_number);
3008 ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev,
3009 &reply->ports[i]);
3010 if (ret == 1)
3011 changed = true;
3012 else if (ret < 0)
3013 goto out;
3014 }
3015
3016
3017
3018
3019
3020
3021 mutex_lock(&mgr->lock);
3022 list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
3023 if (port_mask & BIT(port->port_num))
3024 continue;
3025
3026 DRM_DEBUG_KMS("port %d was not in link address, removing\n",
3027 port->port_num);
3028 list_del(&port->next);
3029 drm_dp_mst_topology_put_port(port);
3030 changed = true;
3031 }
3032 mutex_unlock(&mgr->lock);
3033
3034out:
3035 if (ret <= 0)
3036 mstb->link_address_sent = false;
3037 kfree(txmsg);
3038 return ret < 0 ? ret : changed;
3039}
3040
3041static void
3042drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
3043 struct drm_dp_mst_branch *mstb)
3044{
3045 struct drm_dp_sideband_msg_tx *txmsg;
3046 int ret;
3047
3048 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3049 if (!txmsg)
3050 return;
3051
3052 txmsg->dst = mstb;
3053 build_clear_payload_id_table(txmsg);
3054
3055 drm_dp_queue_down_tx(mgr, txmsg);
3056
3057 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3058 if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3059 DRM_DEBUG_KMS("clear payload table id nak received\n");
3060
3061 kfree(txmsg);
3062}
3063
3064static int
3065drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
3066 struct drm_dp_mst_branch *mstb,
3067 struct drm_dp_mst_port *port)
3068{
3069 struct drm_dp_enum_path_resources_ack_reply *path_res;
3070 struct drm_dp_sideband_msg_tx *txmsg;
3071 int ret;
3072
3073 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3074 if (!txmsg)
3075 return -ENOMEM;
3076
3077 txmsg->dst = mstb;
3078 build_enum_path_resources(txmsg, port->port_num);
3079
3080 drm_dp_queue_down_tx(mgr, txmsg);
3081
3082 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3083 if (ret > 0) {
3084 ret = 0;
3085 path_res = &txmsg->reply.u.path_resources;
3086
3087 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
3088 DRM_DEBUG_KMS("enum path resources nak received\n");
3089 } else {
3090 if (port->port_num != path_res->port_number)
3091 DRM_ERROR("got incorrect port in response\n");
3092
3093 DRM_DEBUG_KMS("enum path resources %d: %d %d\n",
3094 path_res->port_number,
3095 path_res->full_payload_bw_number,
3096 path_res->avail_payload_bw_number);
3097
3098
3099
3100
3101
3102 if (port->full_pbn != path_res->full_payload_bw_number ||
3103 port->fec_capable != path_res->fec_capable)
3104 ret = 1;
3105
3106 port->full_pbn = path_res->full_payload_bw_number;
3107 port->fec_capable = path_res->fec_capable;
3108 }
3109 }
3110
3111 kfree(txmsg);
3112 return ret;
3113}
3114
3115static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
3116{
3117 if (!mstb->port_parent)
3118 return NULL;
3119
3120 if (mstb->port_parent->mstb != mstb)
3121 return mstb->port_parent;
3122
3123 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
3124}
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134static struct drm_dp_mst_branch *
3135drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
3136 struct drm_dp_mst_branch *mstb,
3137 int *port_num)
3138{
3139 struct drm_dp_mst_branch *rmstb = NULL;
3140 struct drm_dp_mst_port *found_port;
3141
3142 mutex_lock(&mgr->lock);
3143 if (!mgr->mst_primary)
3144 goto out;
3145
3146 do {
3147 found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
3148 if (!found_port)
3149 break;
3150
3151 if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
3152 rmstb = found_port->parent;
3153 *port_num = found_port->port_num;
3154 } else {
3155
3156 mstb = found_port->parent;
3157 }
3158 } while (!rmstb);
3159out:
3160 mutex_unlock(&mgr->lock);
3161 return rmstb;
3162}
3163
3164static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
3165 struct drm_dp_mst_port *port,
3166 int id,
3167 int pbn)
3168{
3169 struct drm_dp_sideband_msg_tx *txmsg;
3170 struct drm_dp_mst_branch *mstb;
3171 int ret, port_num;
3172 u8 sinks[DRM_DP_MAX_SDP_STREAMS];
3173 int i;
3174
3175 port_num = port->port_num;
3176 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3177 if (!mstb) {
3178 mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
3179 port->parent,
3180 &port_num);
3181
3182 if (!mstb)
3183 return -EINVAL;
3184 }
3185
3186 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3187 if (!txmsg) {
3188 ret = -ENOMEM;
3189 goto fail_put;
3190 }
3191
3192 for (i = 0; i < port->num_sdp_streams; i++)
3193 sinks[i] = i;
3194
3195 txmsg->dst = mstb;
3196 build_allocate_payload(txmsg, port_num,
3197 id,
3198 pbn, port->num_sdp_streams, sinks);
3199
3200 drm_dp_queue_down_tx(mgr, txmsg);
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3211 if (ret > 0) {
3212 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3213 ret = -EINVAL;
3214 else
3215 ret = 0;
3216 }
3217 kfree(txmsg);
3218fail_put:
3219 drm_dp_mst_topology_put_mstb(mstb);
3220 return ret;
3221}
3222
3223int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
3224 struct drm_dp_mst_port *port, bool power_up)
3225{
3226 struct drm_dp_sideband_msg_tx *txmsg;
3227 int ret;
3228
3229 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3230 if (!port)
3231 return -EINVAL;
3232
3233 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3234 if (!txmsg) {
3235 drm_dp_mst_topology_put_port(port);
3236 return -ENOMEM;
3237 }
3238
3239 txmsg->dst = port->parent;
3240 build_power_updown_phy(txmsg, port->port_num, power_up);
3241 drm_dp_queue_down_tx(mgr, txmsg);
3242
3243 ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
3244 if (ret > 0) {
3245 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3246 ret = -EINVAL;
3247 else
3248 ret = 0;
3249 }
3250 kfree(txmsg);
3251 drm_dp_mst_topology_put_port(port);
3252
3253 return ret;
3254}
3255EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
3256
3257int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
3258 struct drm_dp_mst_port *port,
3259 struct drm_dp_query_stream_enc_status_ack_reply *status)
3260{
3261 struct drm_dp_sideband_msg_tx *txmsg;
3262 u8 nonce[7];
3263 int len, ret;
3264
3265 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3266 if (!txmsg)
3267 return -ENOMEM;
3268
3269 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3270 if (!port) {
3271 ret = -EINVAL;
3272 goto out_get_port;
3273 }
3274
3275 get_random_bytes(nonce, sizeof(nonce));
3276
3277
3278
3279
3280
3281
3282 txmsg->dst = mgr->mst_primary;
3283
3284 len = build_query_stream_enc_status(txmsg, port->vcpi.vcpi, nonce);
3285
3286 drm_dp_queue_down_tx(mgr, txmsg);
3287
3288 ret = drm_dp_mst_wait_tx_reply(mgr->mst_primary, txmsg);
3289 if (ret < 0) {
3290 goto out;
3291 } else if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
3292 drm_dbg_kms(mgr->dev, "query encryption status nak received\n");
3293 ret = -ENXIO;
3294 goto out;
3295 }
3296
3297 ret = 0;
3298 memcpy(status, &txmsg->reply.u.enc_status, sizeof(*status));
3299
3300out:
3301 drm_dp_mst_topology_put_port(port);
3302out_get_port:
3303 kfree(txmsg);
3304 return ret;
3305}
3306EXPORT_SYMBOL(drm_dp_send_query_stream_enc_status);
3307
3308static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
3309 int id,
3310 struct drm_dp_payload *payload)
3311{
3312 int ret;
3313
3314 ret = drm_dp_dpcd_write_payload(mgr, id, payload);
3315 if (ret < 0) {
3316 payload->payload_state = 0;
3317 return ret;
3318 }
3319 payload->payload_state = DP_PAYLOAD_LOCAL;
3320 return 0;
3321}
3322
3323static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
3324 struct drm_dp_mst_port *port,
3325 int id,
3326 struct drm_dp_payload *payload)
3327{
3328 int ret;
3329
3330 ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
3331 if (ret < 0)
3332 return ret;
3333 payload->payload_state = DP_PAYLOAD_REMOTE;
3334 return ret;
3335}
3336
3337static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
3338 struct drm_dp_mst_port *port,
3339 int id,
3340 struct drm_dp_payload *payload)
3341{
3342 DRM_DEBUG_KMS("\n");
3343
3344 if (port) {
3345 drm_dp_payload_send_msg(mgr, port, id, 0);
3346 }
3347
3348 drm_dp_dpcd_write_payload(mgr, id, payload);
3349 payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
3350 return 0;
3351}
3352
3353static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
3354 int id,
3355 struct drm_dp_payload *payload)
3356{
3357 payload->payload_state = 0;
3358 return 0;
3359}
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
3375{
3376 struct drm_dp_payload req_payload;
3377 struct drm_dp_mst_port *port;
3378 int i, j;
3379 int cur_slots = 1;
3380
3381 mutex_lock(&mgr->payload_lock);
3382 for (i = 0; i < mgr->max_payloads; i++) {
3383 struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
3384 struct drm_dp_payload *payload = &mgr->payloads[i];
3385 bool put_port = false;
3386
3387
3388
3389 req_payload.start_slot = cur_slots;
3390 if (vcpi) {
3391 port = container_of(vcpi, struct drm_dp_mst_port,
3392 vcpi);
3393
3394
3395
3396
3397 if (vcpi->num_slots) {
3398 port = drm_dp_mst_topology_get_port_validated(
3399 mgr, port);
3400 if (!port) {
3401 mutex_unlock(&mgr->payload_lock);
3402 return -EINVAL;
3403 }
3404 put_port = true;
3405 }
3406
3407 req_payload.num_slots = vcpi->num_slots;
3408 req_payload.vcpi = vcpi->vcpi;
3409 } else {
3410 port = NULL;
3411 req_payload.num_slots = 0;
3412 }
3413
3414 payload->start_slot = req_payload.start_slot;
3415
3416 if (payload->num_slots != req_payload.num_slots) {
3417
3418
3419 if (req_payload.num_slots) {
3420 drm_dp_create_payload_step1(mgr, vcpi->vcpi,
3421 &req_payload);
3422 payload->num_slots = req_payload.num_slots;
3423 payload->vcpi = req_payload.vcpi;
3424
3425 } else if (payload->num_slots) {
3426 payload->num_slots = 0;
3427 drm_dp_destroy_payload_step1(mgr, port,
3428 payload->vcpi,
3429 payload);
3430 req_payload.payload_state =
3431 payload->payload_state;
3432 payload->start_slot = 0;
3433 }
3434 payload->payload_state = req_payload.payload_state;
3435 }
3436 cur_slots += req_payload.num_slots;
3437
3438 if (put_port)
3439 drm_dp_mst_topology_put_port(port);
3440 }
3441
3442 for (i = 0; i < mgr->max_payloads; ) {
3443 if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) {
3444 i++;
3445 continue;
3446 }
3447
3448 DRM_DEBUG_KMS("removing payload %d\n", i);
3449 for (j = i; j < mgr->max_payloads - 1; j++) {
3450 mgr->payloads[j] = mgr->payloads[j + 1];
3451 mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
3452
3453 if (mgr->proposed_vcpis[j] &&
3454 mgr->proposed_vcpis[j]->num_slots) {
3455 set_bit(j + 1, &mgr->payload_mask);
3456 } else {
3457 clear_bit(j + 1, &mgr->payload_mask);
3458 }
3459 }
3460
3461 memset(&mgr->payloads[mgr->max_payloads - 1], 0,
3462 sizeof(struct drm_dp_payload));
3463 mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
3464 clear_bit(mgr->max_payloads, &mgr->payload_mask);
3465 }
3466 mutex_unlock(&mgr->payload_lock);
3467
3468 return 0;
3469}
3470EXPORT_SYMBOL(drm_dp_update_payload_part1);
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
3482{
3483 struct drm_dp_mst_port *port;
3484 int i;
3485 int ret = 0;
3486
3487 mutex_lock(&mgr->payload_lock);
3488 for (i = 0; i < mgr->max_payloads; i++) {
3489
3490 if (!mgr->proposed_vcpis[i])
3491 continue;
3492
3493 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
3494
3495 DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
3496 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
3497 ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
3498 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
3499 ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
3500 }
3501 if (ret) {
3502 mutex_unlock(&mgr->payload_lock);
3503 return ret;
3504 }
3505 }
3506 mutex_unlock(&mgr->payload_lock);
3507 return 0;
3508}
3509EXPORT_SYMBOL(drm_dp_update_payload_part2);
3510
3511static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
3512 struct drm_dp_mst_port *port,
3513 int offset, int size, u8 *bytes)
3514{
3515 int ret = 0;
3516 struct drm_dp_sideband_msg_tx *txmsg;
3517 struct drm_dp_mst_branch *mstb;
3518
3519 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3520 if (!mstb)
3521 return -EINVAL;
3522
3523 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3524 if (!txmsg) {
3525 ret = -ENOMEM;
3526 goto fail_put;
3527 }
3528
3529 build_dpcd_read(txmsg, port->port_num, offset, size);
3530 txmsg->dst = port->parent;
3531
3532 drm_dp_queue_down_tx(mgr, txmsg);
3533
3534 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3535 if (ret < 0)
3536 goto fail_free;
3537
3538
3539 if (txmsg->reply.reply_type == 1) {
3540 DRM_ERROR("mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n",
3541 mstb, port->port_num, offset, size);
3542 ret = -EIO;
3543 goto fail_free;
3544 }
3545
3546 if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) {
3547 ret = -EPROTO;
3548 goto fail_free;
3549 }
3550
3551 ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes,
3552 size);
3553 memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret);
3554
3555fail_free:
3556 kfree(txmsg);
3557fail_put:
3558 drm_dp_mst_topology_put_mstb(mstb);
3559
3560 return ret;
3561}
3562
3563static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
3564 struct drm_dp_mst_port *port,
3565 int offset, int size, u8 *bytes)
3566{
3567 int ret;
3568 struct drm_dp_sideband_msg_tx *txmsg;
3569 struct drm_dp_mst_branch *mstb;
3570
3571 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3572 if (!mstb)
3573 return -EINVAL;
3574
3575 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3576 if (!txmsg) {
3577 ret = -ENOMEM;
3578 goto fail_put;
3579 }
3580
3581 build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
3582 txmsg->dst = mstb;
3583
3584 drm_dp_queue_down_tx(mgr, txmsg);
3585
3586 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3587 if (ret > 0) {
3588 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3589 ret = -EIO;
3590 else
3591 ret = size;
3592 }
3593
3594 kfree(txmsg);
3595fail_put:
3596 drm_dp_mst_topology_put_mstb(mstb);
3597 return ret;
3598}
3599
3600static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
3601{
3602 struct drm_dp_sideband_msg_reply_body reply;
3603
3604 reply.reply_type = DP_SIDEBAND_REPLY_ACK;
3605 reply.req_type = req_type;
3606 drm_dp_encode_sideband_reply(&reply, msg);
3607 return 0;
3608}
3609
3610static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
3611 struct drm_dp_mst_branch *mstb,
3612 int req_type, bool broadcast)
3613{
3614 struct drm_dp_sideband_msg_tx *txmsg;
3615
3616 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3617 if (!txmsg)
3618 return -ENOMEM;
3619
3620 txmsg->dst = mstb;
3621 drm_dp_encode_up_ack_reply(txmsg, req_type);
3622
3623 mutex_lock(&mgr->qlock);
3624
3625 process_single_tx_qlock(mgr, txmsg, true);
3626 mutex_unlock(&mgr->qlock);
3627
3628 kfree(txmsg);
3629 return 0;
3630}
3631
3632static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8 dp_link_count)
3633{
3634 if (dp_link_bw == 0 || dp_link_count == 0)
3635 DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
3636 dp_link_bw, dp_link_count);
3637
3638 return dp_link_bw * dp_link_count / 2;
3639}
3640
3641
3642
3643
3644
3645
3646
3647
3648bool drm_dp_read_mst_cap(struct drm_dp_aux *aux,
3649 const u8 dpcd[DP_RECEIVER_CAP_SIZE])
3650{
3651 u8 mstm_cap;
3652
3653 if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_12)
3654 return false;
3655
3656 if (drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &mstm_cap) != 1)
3657 return false;
3658
3659 return mstm_cap & DP_MST_CAP;
3660}
3661EXPORT_SYMBOL(drm_dp_read_mst_cap);
3662
3663
3664
3665
3666
3667
3668
3669
3670
3671int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
3672{
3673 int ret = 0;
3674 struct drm_dp_mst_branch *mstb = NULL;
3675
3676 mutex_lock(&mgr->payload_lock);
3677 mutex_lock(&mgr->lock);
3678 if (mst_state == mgr->mst_state)
3679 goto out_unlock;
3680
3681 mgr->mst_state = mst_state;
3682
3683 if (mst_state) {
3684 struct drm_dp_payload reset_pay;
3685
3686 WARN_ON(mgr->mst_primary);
3687
3688
3689 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
3690 if (ret != DP_RECEIVER_CAP_SIZE) {
3691 DRM_DEBUG_KMS("failed to read DPCD\n");
3692 goto out_unlock;
3693 }
3694
3695 mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1],
3696 mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
3697 if (mgr->pbn_div == 0) {
3698 ret = -EINVAL;
3699 goto out_unlock;
3700 }
3701
3702
3703 mstb = drm_dp_add_mst_branch_device(1, NULL);
3704 if (mstb == NULL) {
3705 ret = -ENOMEM;
3706 goto out_unlock;
3707 }
3708 mstb->mgr = mgr;
3709
3710
3711 mgr->mst_primary = mstb;
3712 drm_dp_mst_topology_get_mstb(mgr->mst_primary);
3713
3714 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3715 DP_MST_EN |
3716 DP_UP_REQ_EN |
3717 DP_UPSTREAM_IS_SRC);
3718 if (ret < 0)
3719 goto out_unlock;
3720
3721 reset_pay.start_slot = 0;
3722 reset_pay.num_slots = 0x3f;
3723 drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
3724
3725 queue_work(system_long_wq, &mgr->work);
3726
3727 ret = 0;
3728 } else {
3729
3730 mstb = mgr->mst_primary;
3731 mgr->mst_primary = NULL;
3732
3733 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
3734 ret = 0;
3735 memset(mgr->payloads, 0,
3736 mgr->max_payloads * sizeof(mgr->payloads[0]));
3737 memset(mgr->proposed_vcpis, 0,
3738 mgr->max_payloads * sizeof(mgr->proposed_vcpis[0]));
3739 mgr->payload_mask = 0;
3740 set_bit(0, &mgr->payload_mask);
3741 mgr->vcpi_mask = 0;
3742 mgr->payload_id_table_cleared = false;
3743 }
3744
3745out_unlock:
3746 mutex_unlock(&mgr->lock);
3747 mutex_unlock(&mgr->payload_lock);
3748 if (mstb)
3749 drm_dp_mst_topology_put_mstb(mstb);
3750 return ret;
3751
3752}
3753EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
3754
3755static void
3756drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
3757{
3758 struct drm_dp_mst_port *port;
3759
3760
3761 mstb->link_address_sent = false;
3762
3763 list_for_each_entry(port, &mstb->ports, next)
3764 if (port->mstb)
3765 drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb);
3766}
3767
3768
3769
3770
3771
3772
3773
3774
3775void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
3776{
3777 mutex_lock(&mgr->lock);
3778 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3779 DP_MST_EN | DP_UPSTREAM_IS_SRC);
3780 mutex_unlock(&mgr->lock);
3781 flush_work(&mgr->up_req_work);
3782 flush_work(&mgr->work);
3783 flush_work(&mgr->delayed_destroy_work);
3784
3785 mutex_lock(&mgr->lock);
3786 if (mgr->mst_state && mgr->mst_primary)
3787 drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);
3788 mutex_unlock(&mgr->lock);
3789}
3790EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
3791
3792
3793
3794
3795
3796
3797
3798
3799
3800
3801
3802
3803
3804
3805
3806
3807
3808
3809
3810
3811
3812int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
3813 bool sync)
3814{
3815 int ret;
3816 u8 guid[16];
3817
3818 mutex_lock(&mgr->lock);
3819 if (!mgr->mst_primary)
3820 goto out_fail;
3821
3822 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd,
3823 DP_RECEIVER_CAP_SIZE);
3824 if (ret != DP_RECEIVER_CAP_SIZE) {
3825 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
3826 goto out_fail;
3827 }
3828
3829 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3830 DP_MST_EN |
3831 DP_UP_REQ_EN |
3832 DP_UPSTREAM_IS_SRC);
3833 if (ret < 0) {
3834 DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
3835 goto out_fail;
3836 }
3837
3838
3839 ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
3840 if (ret != 16) {
3841 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
3842 goto out_fail;
3843 }
3844
3845 ret = drm_dp_check_mstb_guid(mgr->mst_primary, guid);
3846 if (ret) {
3847 DRM_DEBUG_KMS("check mstb failed - undocked during suspend?\n");
3848 goto out_fail;
3849 }
3850
3851
3852
3853
3854
3855
3856 queue_work(system_long_wq, &mgr->work);
3857 mutex_unlock(&mgr->lock);
3858
3859 if (sync) {
3860 DRM_DEBUG_KMS("Waiting for link probe work to finish re-syncing topology...\n");
3861 flush_work(&mgr->work);
3862 }
3863
3864 return 0;
3865
3866out_fail:
3867 mutex_unlock(&mgr->lock);
3868 return -1;
3869}
3870EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
3871
3872static bool
3873drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
3874 struct drm_dp_mst_branch **mstb)
3875{
3876 int len;
3877 u8 replyblock[32];
3878 int replylen, curreply;
3879 int ret;
3880 u8 hdrlen;
3881 struct drm_dp_sideband_msg_hdr hdr;
3882 struct drm_dp_sideband_msg_rx *msg =
3883 up ? &mgr->up_req_recv : &mgr->down_rep_recv;
3884 int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE :
3885 DP_SIDEBAND_MSG_DOWN_REP_BASE;
3886
3887 if (!up)
3888 *mstb = NULL;
3889
3890 len = min(mgr->max_dpcd_transaction_bytes, 16);
3891 ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len);
3892 if (ret != len) {
3893 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
3894 return false;
3895 }
3896
3897 ret = drm_dp_decode_sideband_msg_hdr(&hdr, replyblock, len, &hdrlen);
3898 if (ret == false) {
3899 print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16,
3900 1, replyblock, len, false);
3901 DRM_DEBUG_KMS("ERROR: failed header\n");
3902 return false;
3903 }
3904
3905 if (!up) {
3906
3907 *mstb = drm_dp_get_mst_branch_device(mgr, hdr.lct, hdr.rad);
3908 if (!*mstb) {
3909 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
3910 hdr.lct);
3911 return false;
3912 }
3913 }
3914
3915 if (!drm_dp_sideband_msg_set_header(msg, &hdr, hdrlen)) {
3916 DRM_DEBUG_KMS("sideband msg set header failed %d\n",
3917 replyblock[0]);
3918 return false;
3919 }
3920
3921 replylen = min(msg->curchunk_len, (u8)(len - hdrlen));
3922 ret = drm_dp_sideband_append_payload(msg, replyblock + hdrlen, replylen);
3923 if (!ret) {
3924 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
3925 return false;
3926 }
3927
3928 replylen = msg->curchunk_len + msg->curchunk_hdrlen - len;
3929 curreply = len;
3930 while (replylen > 0) {
3931 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
3932 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
3933 replyblock, len);
3934 if (ret != len) {
3935 DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
3936 len, ret);
3937 return false;
3938 }
3939
3940 ret = drm_dp_sideband_append_payload(msg, replyblock, len);
3941 if (!ret) {
3942 DRM_DEBUG_KMS("failed to build sideband msg\n");
3943 return false;
3944 }
3945
3946 curreply += len;
3947 replylen -= len;
3948 }
3949 return true;
3950}
3951
3952static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
3953{
3954 struct drm_dp_sideband_msg_tx *txmsg;
3955 struct drm_dp_mst_branch *mstb = NULL;
3956 struct drm_dp_sideband_msg_rx *msg = &mgr->down_rep_recv;
3957
3958 if (!drm_dp_get_one_sb_msg(mgr, false, &mstb))
3959 goto out;
3960
3961
3962 if (!msg->have_eomt)
3963 goto out;
3964
3965
3966 mutex_lock(&mgr->qlock);
3967 txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
3968 struct drm_dp_sideband_msg_tx, next);
3969 mutex_unlock(&mgr->qlock);
3970
3971
3972 if (!txmsg || txmsg->dst != mstb) {
3973 struct drm_dp_sideband_msg_hdr *hdr;
3974
3975 hdr = &msg->initial_hdr;
3976 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
3977 mstb, hdr->seqno, hdr->lct, hdr->rad[0],
3978 msg->msg[0]);
3979 goto out_clear_reply;
3980 }
3981
3982 drm_dp_sideband_parse_reply(msg, &txmsg->reply);
3983
3984 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
3985 DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
3986 txmsg->reply.req_type,
3987 drm_dp_mst_req_type_str(txmsg->reply.req_type),
3988 txmsg->reply.u.nak.reason,
3989 drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
3990 txmsg->reply.u.nak.nak_data);
3991 }
3992
3993 memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
3994 drm_dp_mst_topology_put_mstb(mstb);
3995
3996 mutex_lock(&mgr->qlock);
3997 txmsg->state = DRM_DP_SIDEBAND_TX_RX;
3998 list_del(&txmsg->next);
3999 mutex_unlock(&mgr->qlock);
4000
4001 wake_up_all(&mgr->tx_waitq);
4002
4003 return 0;
4004
4005out_clear_reply:
4006 memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
4007out:
4008 if (mstb)
4009 drm_dp_mst_topology_put_mstb(mstb);
4010
4011 return 0;
4012}
4013
4014static inline bool
4015drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
4016 struct drm_dp_pending_up_req *up_req)
4017{
4018 struct drm_dp_mst_branch *mstb = NULL;
4019 struct drm_dp_sideband_msg_req_body *msg = &up_req->msg;
4020 struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr;
4021 bool hotplug = false;
4022
4023 if (hdr->broadcast) {
4024 const u8 *guid = NULL;
4025
4026 if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY)
4027 guid = msg->u.conn_stat.guid;
4028 else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
4029 guid = msg->u.resource_stat.guid;
4030
4031 if (guid)
4032 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
4033 } else {
4034 mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
4035 }
4036
4037 if (!mstb) {
4038 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
4039 hdr->lct);
4040 return false;
4041 }
4042
4043
4044 if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
4045 drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
4046 hotplug = true;
4047 }
4048
4049 drm_dp_mst_topology_put_mstb(mstb);
4050 return hotplug;
4051}
4052
4053static void drm_dp_mst_up_req_work(struct work_struct *work)
4054{
4055 struct drm_dp_mst_topology_mgr *mgr =
4056 container_of(work, struct drm_dp_mst_topology_mgr,
4057 up_req_work);
4058 struct drm_dp_pending_up_req *up_req;
4059 bool send_hotplug = false;
4060
4061 mutex_lock(&mgr->probe_lock);
4062 while (true) {
4063 mutex_lock(&mgr->up_req_lock);
4064 up_req = list_first_entry_or_null(&mgr->up_req_list,
4065 struct drm_dp_pending_up_req,
4066 next);
4067 if (up_req)
4068 list_del(&up_req->next);
4069 mutex_unlock(&mgr->up_req_lock);
4070
4071 if (!up_req)
4072 break;
4073
4074 send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req);
4075 kfree(up_req);
4076 }
4077 mutex_unlock(&mgr->probe_lock);
4078
4079 if (send_hotplug)
4080 drm_kms_helper_hotplug_event(mgr->dev);
4081}
4082
4083static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
4084{
4085 struct drm_dp_pending_up_req *up_req;
4086
4087 if (!drm_dp_get_one_sb_msg(mgr, true, NULL))
4088 goto out;
4089
4090 if (!mgr->up_req_recv.have_eomt)
4091 return 0;
4092
4093 up_req = kzalloc(sizeof(*up_req), GFP_KERNEL);
4094 if (!up_req) {
4095 DRM_ERROR("Not enough memory to process MST up req\n");
4096 return -ENOMEM;
4097 }
4098 INIT_LIST_HEAD(&up_req->next);
4099
4100 drm_dp_sideband_parse_req(&mgr->up_req_recv, &up_req->msg);
4101
4102 if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY &&
4103 up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) {
4104 DRM_DEBUG_KMS("Received unknown up req type, ignoring: %x\n",
4105 up_req->msg.req_type);
4106 kfree(up_req);
4107 goto out;
4108 }
4109
4110 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
4111 false);
4112
4113 if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
4114 const struct drm_dp_connection_status_notify *conn_stat =
4115 &up_req->msg.u.conn_stat;
4116
4117 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
4118 conn_stat->port_number,
4119 conn_stat->legacy_device_plug_status,
4120 conn_stat->displayport_device_plug_status,
4121 conn_stat->message_capability_status,
4122 conn_stat->input_port,
4123 conn_stat->peer_device_type);
4124 } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
4125 const struct drm_dp_resource_status_notify *res_stat =
4126 &up_req->msg.u.resource_stat;
4127
4128 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n",
4129 res_stat->port_number,
4130 res_stat->available_pbn);
4131 }
4132
4133 up_req->hdr = mgr->up_req_recv.initial_hdr;
4134 mutex_lock(&mgr->up_req_lock);
4135 list_add_tail(&up_req->next, &mgr->up_req_list);
4136 mutex_unlock(&mgr->up_req_lock);
4137 queue_work(system_long_wq, &mgr->up_req_work);
4138
4139out:
4140 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
4141 return 0;
4142}
4143
4144
4145
4146
4147
4148
4149
4150
4151
4152
4153
4154
4155int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
4156{
4157 int ret = 0;
4158 int sc;
4159 *handled = false;
4160 sc = esi[0] & 0x3f;
4161
4162 if (sc != mgr->sink_count) {
4163 mgr->sink_count = sc;
4164 *handled = true;
4165 }
4166
4167 if (esi[1] & DP_DOWN_REP_MSG_RDY) {
4168 ret = drm_dp_mst_handle_down_rep(mgr);
4169 *handled = true;
4170 }
4171
4172 if (esi[1] & DP_UP_REQ_MSG_RDY) {
4173 ret |= drm_dp_mst_handle_up_req(mgr);
4174 *handled = true;
4175 }
4176
4177 drm_dp_mst_kick_tx(mgr);
4178 return ret;
4179}
4180EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
4181
4182
4183
4184
4185
4186
4187
4188
4189
4190
4191int
4192drm_dp_mst_detect_port(struct drm_connector *connector,
4193 struct drm_modeset_acquire_ctx *ctx,
4194 struct drm_dp_mst_topology_mgr *mgr,
4195 struct drm_dp_mst_port *port)
4196{
4197 int ret;
4198
4199
4200 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4201 if (!port)
4202 return connector_status_disconnected;
4203
4204 ret = drm_modeset_lock(&mgr->base.lock, ctx);
4205 if (ret)
4206 goto out;
4207
4208 ret = connector_status_disconnected;
4209
4210 if (!port->ddps)
4211 goto out;
4212
4213 switch (port->pdt) {
4214 case DP_PEER_DEVICE_NONE:
4215 case DP_PEER_DEVICE_MST_BRANCHING:
4216 if (!port->mcs)
4217 ret = connector_status_connected;
4218 break;
4219
4220 case DP_PEER_DEVICE_SST_SINK:
4221 ret = connector_status_connected;
4222
4223 if (port->port_num >= 8 && !port->cached_edid) {
4224 port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
4225 }
4226 break;
4227 case DP_PEER_DEVICE_DP_LEGACY_CONV:
4228 if (port->ldps)
4229 ret = connector_status_connected;
4230 break;
4231 }
4232out:
4233 drm_dp_mst_topology_put_port(port);
4234 return ret;
4235}
4236EXPORT_SYMBOL(drm_dp_mst_detect_port);
4237
4238
4239
4240
4241
4242
4243
4244
4245
4246
4247
4248struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4249{
4250 struct edid *edid = NULL;
4251
4252
4253 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4254 if (!port)
4255 return NULL;
4256
4257 if (port->cached_edid)
4258 edid = drm_edid_duplicate(port->cached_edid);
4259 else {
4260 edid = drm_get_edid(connector, &port->aux.ddc);
4261 }
4262 port->has_audio = drm_detect_monitor_audio(edid);
4263 drm_dp_mst_topology_put_port(port);
4264 return edid;
4265}
4266EXPORT_SYMBOL(drm_dp_mst_get_edid);
4267
4268
4269
4270
4271
4272
4273
4274
4275
4276
4277
4278
4279
4280int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
4281 int pbn)
4282{
4283 int num_slots;
4284
4285 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
4286
4287
4288 if (num_slots > 63)
4289 return -ENOSPC;
4290 return num_slots;
4291}
4292EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
4293
4294static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4295 struct drm_dp_vcpi *vcpi, int pbn, int slots)
4296{
4297 int ret;
4298
4299
4300 if (slots > 63)
4301 return -ENOSPC;
4302
4303 vcpi->pbn = pbn;
4304 vcpi->aligned_pbn = slots * mgr->pbn_div;
4305 vcpi->num_slots = slots;
4306
4307 ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
4308 if (ret < 0)
4309 return ret;
4310 return 0;
4311}
4312
4313
4314
4315
4316
4317
4318
4319
4320
4321
4322
4323
4324
4325
4326
4327
4328
4329
4330
4331
4332
4333
4334
4335
4336
4337
4338
4339
4340
4341
4342
4343
4344int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
4345 struct drm_dp_mst_topology_mgr *mgr,
4346 struct drm_dp_mst_port *port, int pbn,
4347 int pbn_div)
4348{
4349 struct drm_dp_mst_topology_state *topology_state;
4350 struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
4351 int prev_slots, prev_bw, req_slots;
4352
4353 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
4354 if (IS_ERR(topology_state))
4355 return PTR_ERR(topology_state);
4356
4357
4358 list_for_each_entry(pos, &topology_state->vcpis, next) {
4359 if (pos->port == port) {
4360 vcpi = pos;
4361 prev_slots = vcpi->vcpi;
4362 prev_bw = vcpi->pbn;
4363
4364
4365
4366
4367
4368
4369 if (WARN_ON(!prev_slots)) {
4370 DRM_ERROR("cannot allocate and release VCPI on [MST PORT:%p] in the same state\n",
4371 port);
4372 return -EINVAL;
4373 }
4374
4375 break;
4376 }
4377 }
4378 if (!vcpi) {
4379 prev_slots = 0;
4380 prev_bw = 0;
4381 }
4382
4383 if (pbn_div <= 0)
4384 pbn_div = mgr->pbn_div;
4385
4386 req_slots = DIV_ROUND_UP(pbn, pbn_div);
4387
4388 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
4389 port->connector->base.id, port->connector->name,
4390 port, prev_slots, req_slots);
4391 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n",
4392 port->connector->base.id, port->connector->name,
4393 port, prev_bw, pbn);
4394
4395
4396 if (!vcpi) {
4397 vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL);
4398 if (!vcpi)
4399 return -ENOMEM;
4400
4401 drm_dp_mst_get_port_malloc(port);
4402 vcpi->port = port;
4403 list_add(&vcpi->next, &topology_state->vcpis);
4404 }
4405 vcpi->vcpi = req_slots;
4406 vcpi->pbn = pbn;
4407
4408 return req_slots;
4409}
4410EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
4411
4412
4413
4414
4415
4416
4417
4418
4419
4420
4421
4422
4423
4424
4425
4426
4427
4428
4429
4430
4431
4432
4433
4434
4435
4436
4437
4438int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
4439 struct drm_dp_mst_topology_mgr *mgr,
4440 struct drm_dp_mst_port *port)
4441{
4442 struct drm_dp_mst_topology_state *topology_state;
4443 struct drm_dp_vcpi_allocation *pos;
4444 bool found = false;
4445
4446 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
4447 if (IS_ERR(topology_state))
4448 return PTR_ERR(topology_state);
4449
4450 list_for_each_entry(pos, &topology_state->vcpis, next) {
4451 if (pos->port == port) {
4452 found = true;
4453 break;
4454 }
4455 }
4456 if (WARN_ON(!found)) {
4457 DRM_ERROR("no VCPI for [MST PORT:%p] found in mst state %p\n",
4458 port, &topology_state->base);
4459 return -EINVAL;
4460 }
4461
4462 DRM_DEBUG_ATOMIC("[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi);
4463 if (pos->vcpi) {
4464 drm_dp_mst_put_port_malloc(port);
4465 pos->vcpi = 0;
4466 pos->pbn = 0;
4467 }
4468
4469 return 0;
4470}
4471EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots);
4472
4473
4474
4475
4476
4477
4478
4479
4480bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4481 struct drm_dp_mst_port *port, int pbn, int slots)
4482{
4483 int ret;
4484
4485 if (slots < 0)
4486 return false;
4487
4488 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4489 if (!port)
4490 return false;
4491
4492 if (port->vcpi.vcpi > 0) {
4493 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
4494 port->vcpi.vcpi, port->vcpi.pbn, pbn);
4495 if (pbn == port->vcpi.pbn) {
4496 drm_dp_mst_topology_put_port(port);
4497 return true;
4498 }
4499 }
4500
4501 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
4502 if (ret) {
4503 DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
4504 DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
4505 drm_dp_mst_topology_put_port(port);
4506 goto out;
4507 }
4508 DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
4509 pbn, port->vcpi.num_slots);
4510
4511
4512 drm_dp_mst_get_port_malloc(port);
4513 drm_dp_mst_topology_put_port(port);
4514 return true;
4515out:
4516 return false;
4517}
4518EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
4519
4520int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4521{
4522 int slots = 0;
4523
4524 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4525 if (!port)
4526 return slots;
4527
4528 slots = port->vcpi.num_slots;
4529 drm_dp_mst_topology_put_port(port);
4530 return slots;
4531}
4532EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
4533
4534
4535
4536
4537
4538
4539
4540
4541void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4542{
4543
4544
4545
4546
4547
4548 port->vcpi.num_slots = 0;
4549}
4550EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
4551
4552
4553
4554
4555
4556
4557
4558
4559
4560void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4561 struct drm_dp_mst_port *port)
4562{
4563 if (!port->vcpi.vcpi)
4564 return;
4565
4566 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
4567 port->vcpi.num_slots = 0;
4568 port->vcpi.pbn = 0;
4569 port->vcpi.aligned_pbn = 0;
4570 port->vcpi.vcpi = 0;
4571 drm_dp_mst_put_port_malloc(port);
4572}
4573EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
4574
4575static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
4576 int id, struct drm_dp_payload *payload)
4577{
4578 u8 payload_alloc[3], status;
4579 int ret;
4580 int retries = 0;
4581
4582 drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
4583 DP_PAYLOAD_TABLE_UPDATED);
4584
4585 payload_alloc[0] = id;
4586 payload_alloc[1] = payload->start_slot;
4587 payload_alloc[2] = payload->num_slots;
4588
4589 ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
4590 if (ret != 3) {
4591 DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
4592 goto fail;
4593 }
4594
4595retry:
4596 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
4597 if (ret < 0) {
4598 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
4599 goto fail;
4600 }
4601
4602 if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
4603 retries++;
4604 if (retries < 20) {
4605 usleep_range(10000, 20000);
4606 goto retry;
4607 }
4608 DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
4609 ret = -EINVAL;
4610 goto fail;
4611 }
4612 ret = 0;
4613fail:
4614 return ret;
4615}
4616
4617static int do_get_act_status(struct drm_dp_aux *aux)
4618{
4619 int ret;
4620 u8 status;
4621
4622 ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
4623 if (ret < 0)
4624 return ret;
4625
4626 return status;
4627}
4628
4629
4630
4631
4632
4633
4634
4635
4636
4637
4638
4639
4640int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
4641{
4642
4643
4644
4645
4646
4647
4648 const int timeout_ms = 3000;
4649 int ret, status;
4650
4651 ret = readx_poll_timeout(do_get_act_status, mgr->aux, status,
4652 status & DP_PAYLOAD_ACT_HANDLED || status < 0,
4653 200, timeout_ms * USEC_PER_MSEC);
4654 if (ret < 0 && status >= 0) {
4655 DRM_ERROR("Failed to get ACT after %dms, last status: %02x\n",
4656 timeout_ms, status);
4657 return -EINVAL;
4658 } else if (status < 0) {
4659
4660
4661
4662
4663 DRM_DEBUG_KMS("Failed to read payload table status: %d\n",
4664 status);
4665 return status;
4666 }
4667
4668 return 0;
4669}
4670EXPORT_SYMBOL(drm_dp_check_act_status);
4671
4672
4673
4674
4675
4676
4677
4678
4679
4680int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc)
4681{
4682
4683
4684
4685
4686
4687
4688
4689
4690
4691
4692
4693
4694
4695
4696
4697 if (dsc)
4698 return DIV_ROUND_UP_ULL(mul_u32_u32(clock * (bpp / 16), 64 * 1006),
4699 8 * 54 * 1000 * 1000);
4700
4701 return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),
4702 8 * 54 * 1000 * 1000);
4703}
4704EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
4705
4706
4707static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
4708{
4709 queue_work(system_long_wq, &mgr->tx_work);
4710}
4711
4712static void drm_dp_mst_dump_mstb(struct seq_file *m,
4713 struct drm_dp_mst_branch *mstb)
4714{
4715 struct drm_dp_mst_port *port;
4716 int tabs = mstb->lct;
4717 char prefix[10];
4718 int i;
4719
4720 for (i = 0; i < tabs; i++)
4721 prefix[i] = '\t';
4722 prefix[i] = '\0';
4723
4724 seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
4725 list_for_each_entry(port, &mstb->ports, next) {
4726 seq_printf(m, "%sport: %d: input: %d: pdt: %d, ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix, port->port_num, port->input, port->pdt, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector);
4727 if (port->mstb)
4728 drm_dp_mst_dump_mstb(m, port->mstb);
4729 }
4730}
4731
4732#define DP_PAYLOAD_TABLE_SIZE 64
4733
4734static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
4735 char *buf)
4736{
4737 int i;
4738
4739 for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
4740 if (drm_dp_dpcd_read(mgr->aux,
4741 DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
4742 &buf[i], 16) != 16)
4743 return false;
4744 }
4745 return true;
4746}
4747
4748static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
4749 struct drm_dp_mst_port *port, char *name,
4750 int namelen)
4751{
4752 struct edid *mst_edid;
4753
4754 mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
4755 drm_edid_get_monitor_name(mst_edid, name, namelen);
4756}
4757
4758
4759
4760
4761
4762
4763
4764
4765void drm_dp_mst_dump_topology(struct seq_file *m,
4766 struct drm_dp_mst_topology_mgr *mgr)
4767{
4768 int i;
4769 struct drm_dp_mst_port *port;
4770
4771 mutex_lock(&mgr->lock);
4772 if (mgr->mst_primary)
4773 drm_dp_mst_dump_mstb(m, mgr->mst_primary);
4774
4775
4776 mutex_unlock(&mgr->lock);
4777
4778 mutex_lock(&mgr->payload_lock);
4779 seq_printf(m, "vcpi: %lx %lx %d\n", mgr->payload_mask, mgr->vcpi_mask,
4780 mgr->max_payloads);
4781
4782 for (i = 0; i < mgr->max_payloads; i++) {
4783 if (mgr->proposed_vcpis[i]) {
4784 char name[14];
4785
4786 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
4787 fetch_monitor_name(mgr, port, name, sizeof(name));
4788 seq_printf(m, "vcpi %d: %d %d %d sink name: %s\n", i,
4789 port->port_num, port->vcpi.vcpi,
4790 port->vcpi.num_slots,
4791 (*name != 0) ? name : "Unknown");
4792 } else
4793 seq_printf(m, "vcpi %d:unused\n", i);
4794 }
4795 for (i = 0; i < mgr->max_payloads; i++) {
4796 seq_printf(m, "payload %d: %d, %d, %d\n",
4797 i,
4798 mgr->payloads[i].payload_state,
4799 mgr->payloads[i].start_slot,
4800 mgr->payloads[i].num_slots);
4801
4802
4803 }
4804 mutex_unlock(&mgr->payload_lock);
4805
4806 mutex_lock(&mgr->lock);
4807 if (mgr->mst_primary) {
4808 u8 buf[DP_PAYLOAD_TABLE_SIZE];
4809 int ret;
4810
4811 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
4812 if (ret) {
4813 seq_printf(m, "dpcd read failed\n");
4814 goto out;
4815 }
4816 seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
4817
4818 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
4819 if (ret) {
4820 seq_printf(m, "faux/mst read failed\n");
4821 goto out;
4822 }
4823 seq_printf(m, "faux/mst: %*ph\n", 2, buf);
4824
4825 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
4826 if (ret) {
4827 seq_printf(m, "mst ctrl read failed\n");
4828 goto out;
4829 }
4830 seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
4831
4832
4833 ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
4834 if (ret) {
4835 seq_printf(m, "branch oui read failed\n");
4836 goto out;
4837 }
4838 seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
4839
4840 for (i = 0x3; i < 0x8 && buf[i]; i++)
4841 seq_printf(m, "%c", buf[i]);
4842 seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
4843 buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
4844 if (dump_dp_payload_table(mgr, buf))
4845 seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
4846 }
4847
4848out:
4849 mutex_unlock(&mgr->lock);
4850
4851}
4852EXPORT_SYMBOL(drm_dp_mst_dump_topology);
4853
4854static void drm_dp_tx_work(struct work_struct *work)
4855{
4856 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
4857
4858 mutex_lock(&mgr->qlock);
4859 if (!list_empty(&mgr->tx_msg_downq))
4860 process_single_down_tx_qlock(mgr);
4861 mutex_unlock(&mgr->qlock);
4862}
4863
4864static inline void
4865drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
4866{
4867 drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
4868
4869 if (port->connector) {
4870 drm_connector_unregister(port->connector);
4871 drm_connector_put(port->connector);
4872 }
4873
4874 drm_dp_mst_put_port_malloc(port);
4875}
4876
4877static inline void
4878drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb)
4879{
4880 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
4881 struct drm_dp_mst_port *port, *port_tmp;
4882 struct drm_dp_sideband_msg_tx *txmsg, *txmsg_tmp;
4883 bool wake_tx = false;
4884
4885 mutex_lock(&mgr->lock);
4886 list_for_each_entry_safe(port, port_tmp, &mstb->ports, next) {
4887 list_del(&port->next);
4888 drm_dp_mst_topology_put_port(port);
4889 }
4890 mutex_unlock(&mgr->lock);
4891
4892
4893 mutex_lock(&mstb->mgr->qlock);
4894 list_for_each_entry_safe(txmsg, txmsg_tmp, &mgr->tx_msg_downq, next) {
4895 if (txmsg->dst != mstb)
4896 continue;
4897
4898 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
4899 list_del(&txmsg->next);
4900 wake_tx = true;
4901 }
4902 mutex_unlock(&mstb->mgr->qlock);
4903
4904 if (wake_tx)
4905 wake_up_all(&mstb->mgr->tx_waitq);
4906
4907 drm_dp_mst_put_mstb_malloc(mstb);
4908}
4909
4910static void drm_dp_delayed_destroy_work(struct work_struct *work)
4911{
4912 struct drm_dp_mst_topology_mgr *mgr =
4913 container_of(work, struct drm_dp_mst_topology_mgr,
4914 delayed_destroy_work);
4915 bool send_hotplug = false, go_again;
4916
4917
4918
4919
4920
4921
4922 do {
4923 go_again = false;
4924
4925 for (;;) {
4926 struct drm_dp_mst_branch *mstb;
4927
4928 mutex_lock(&mgr->delayed_destroy_lock);
4929 mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list,
4930 struct drm_dp_mst_branch,
4931 destroy_next);
4932 if (mstb)
4933 list_del(&mstb->destroy_next);
4934 mutex_unlock(&mgr->delayed_destroy_lock);
4935
4936 if (!mstb)
4937 break;
4938
4939 drm_dp_delayed_destroy_mstb(mstb);
4940 go_again = true;
4941 }
4942
4943 for (;;) {
4944 struct drm_dp_mst_port *port;
4945
4946 mutex_lock(&mgr->delayed_destroy_lock);
4947 port = list_first_entry_or_null(&mgr->destroy_port_list,
4948 struct drm_dp_mst_port,
4949 next);
4950 if (port)
4951 list_del(&port->next);
4952 mutex_unlock(&mgr->delayed_destroy_lock);
4953
4954 if (!port)
4955 break;
4956
4957 drm_dp_delayed_destroy_port(port);
4958 send_hotplug = true;
4959 go_again = true;
4960 }
4961 } while (go_again);
4962
4963 if (send_hotplug)
4964 drm_kms_helper_hotplug_event(mgr->dev);
4965}
4966
4967static struct drm_private_state *
4968drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
4969{
4970 struct drm_dp_mst_topology_state *state, *old_state =
4971 to_dp_mst_topology_state(obj->state);
4972 struct drm_dp_vcpi_allocation *pos, *vcpi;
4973
4974 state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
4975 if (!state)
4976 return NULL;
4977
4978 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
4979
4980 INIT_LIST_HEAD(&state->vcpis);
4981
4982 list_for_each_entry(pos, &old_state->vcpis, next) {
4983
4984 if (!pos->vcpi)
4985 continue;
4986
4987 vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL);
4988 if (!vcpi)
4989 goto fail;
4990
4991 drm_dp_mst_get_port_malloc(vcpi->port);
4992 list_add(&vcpi->next, &state->vcpis);
4993 }
4994
4995 return &state->base;
4996
4997fail:
4998 list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) {
4999 drm_dp_mst_put_port_malloc(pos->port);
5000 kfree(pos);
5001 }
5002 kfree(state);
5003
5004 return NULL;
5005}
5006
5007static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
5008 struct drm_private_state *state)
5009{
5010 struct drm_dp_mst_topology_state *mst_state =
5011 to_dp_mst_topology_state(state);
5012 struct drm_dp_vcpi_allocation *pos, *tmp;
5013
5014 list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) {
5015
5016 if (pos->vcpi)
5017 drm_dp_mst_put_port_malloc(pos->port);
5018 kfree(pos);
5019 }
5020
5021 kfree(mst_state);
5022}
5023
5024static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
5025 struct drm_dp_mst_branch *branch)
5026{
5027 while (port->parent) {
5028 if (port->parent == branch)
5029 return true;
5030
5031 if (port->parent->port_parent)
5032 port = port->parent->port_parent;
5033 else
5034 break;
5035 }
5036 return false;
5037}
5038
5039static int
5040drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
5041 struct drm_dp_mst_topology_state *state);
5042
5043static int
5044drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
5045 struct drm_dp_mst_topology_state *state)
5046{
5047 struct drm_dp_vcpi_allocation *vcpi;
5048 struct drm_dp_mst_port *port;
5049 int pbn_used = 0, ret;
5050 bool found = false;
5051
5052
5053
5054
5055 list_for_each_entry(vcpi, &state->vcpis, next) {
5056 if (!vcpi->pbn ||
5057 !drm_dp_mst_port_downstream_of_branch(vcpi->port, mstb))
5058 continue;
5059
5060 found = true;
5061 break;
5062 }
5063 if (!found)
5064 return 0;
5065
5066 if (mstb->port_parent)
5067 DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] Checking bandwidth limits on [MSTB:%p]\n",
5068 mstb->port_parent->parent, mstb->port_parent,
5069 mstb);
5070 else
5071 DRM_DEBUG_ATOMIC("[MSTB:%p] Checking bandwidth limits\n",
5072 mstb);
5073
5074 list_for_each_entry(port, &mstb->ports, next) {
5075 ret = drm_dp_mst_atomic_check_port_bw_limit(port, state);
5076 if (ret < 0)
5077 return ret;
5078
5079 pbn_used += ret;
5080 }
5081
5082 return pbn_used;
5083}
5084
5085static int
5086drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
5087 struct drm_dp_mst_topology_state *state)
5088{
5089 struct drm_dp_vcpi_allocation *vcpi;
5090 int pbn_used = 0;
5091
5092 if (port->pdt == DP_PEER_DEVICE_NONE)
5093 return 0;
5094
5095 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
5096 bool found = false;
5097
5098 list_for_each_entry(vcpi, &state->vcpis, next) {
5099 if (vcpi->port != port)
5100 continue;
5101 if (!vcpi->pbn)
5102 return 0;
5103
5104 found = true;
5105 break;
5106 }
5107 if (!found)
5108 return 0;
5109
5110
5111
5112
5113 if (WARN_ON(!port->full_pbn))
5114 return -EINVAL;
5115
5116 pbn_used = vcpi->pbn;
5117 } else {
5118 pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb,
5119 state);
5120 if (pbn_used <= 0)
5121 return pbn_used;
5122 }
5123
5124 if (pbn_used > port->full_pbn) {
5125 DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n",
5126 port->parent, port, pbn_used,
5127 port->full_pbn);
5128 return -ENOSPC;
5129 }
5130
5131 DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n",
5132 port->parent, port, pbn_used, port->full_pbn);
5133
5134 return pbn_used;
5135}
5136
5137static inline int
5138drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr,
5139 struct drm_dp_mst_topology_state *mst_state)
5140{
5141 struct drm_dp_vcpi_allocation *vcpi;
5142 int avail_slots = 63, payload_count = 0;
5143
5144 list_for_each_entry(vcpi, &mst_state->vcpis, next) {
5145
5146 if (!vcpi->vcpi) {
5147 DRM_DEBUG_ATOMIC("[MST PORT:%p] releases all VCPI slots\n",
5148 vcpi->port);
5149 continue;
5150 }
5151
5152 DRM_DEBUG_ATOMIC("[MST PORT:%p] requires %d vcpi slots\n",
5153 vcpi->port, vcpi->vcpi);
5154
5155 avail_slots -= vcpi->vcpi;
5156 if (avail_slots < 0) {
5157 DRM_DEBUG_ATOMIC("[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n",
5158 vcpi->port, mst_state,
5159 avail_slots + vcpi->vcpi);
5160 return -ENOSPC;
5161 }
5162
5163 if (++payload_count > mgr->max_payloads) {
5164 DRM_DEBUG_ATOMIC("[MST MGR:%p] state %p has too many payloads (max=%d)\n",
5165 mgr, mst_state, mgr->max_payloads);
5166 return -EINVAL;
5167 }
5168 }
5169 DRM_DEBUG_ATOMIC("[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
5170 mgr, mst_state, avail_slots,
5171 63 - avail_slots);
5172
5173 return 0;
5174}
5175
5176
5177
5178
5179
5180
5181
5182
5183
5184
5185
5186
5187
5188
5189int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr)
5190{
5191 struct drm_dp_mst_topology_state *mst_state;
5192 struct drm_dp_vcpi_allocation *pos;
5193 struct drm_connector *connector;
5194 struct drm_connector_state *conn_state;
5195 struct drm_crtc *crtc;
5196 struct drm_crtc_state *crtc_state;
5197
5198 mst_state = drm_atomic_get_mst_topology_state(state, mgr);
5199
5200 if (IS_ERR(mst_state))
5201 return -EINVAL;
5202
5203 list_for_each_entry(pos, &mst_state->vcpis, next) {
5204
5205 connector = pos->port->connector;
5206
5207 if (!connector)
5208 return -EINVAL;
5209
5210 conn_state = drm_atomic_get_connector_state(state, connector);
5211
5212 if (IS_ERR(conn_state))
5213 return PTR_ERR(conn_state);
5214
5215 crtc = conn_state->crtc;
5216
5217 if (!crtc)
5218 continue;
5219
5220 if (!drm_dp_mst_dsc_aux_for_port(pos->port))
5221 continue;
5222
5223 crtc_state = drm_atomic_get_crtc_state(mst_state->base.state, crtc);
5224
5225 if (IS_ERR(crtc_state))
5226 return PTR_ERR(crtc_state);
5227
5228 DRM_DEBUG_ATOMIC("[MST MGR:%p] Setting mode_changed flag on CRTC %p\n",
5229 mgr, crtc);
5230
5231 crtc_state->mode_changed = true;
5232 }
5233 return 0;
5234}
5235EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs);
5236
5237
5238
5239
5240
5241
5242
5243
5244
5245
5246
5247
5248
5249
5250
5251int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
5252 struct drm_dp_mst_port *port,
5253 int pbn, int pbn_div,
5254 bool enable)
5255{
5256 struct drm_dp_mst_topology_state *mst_state;
5257 struct drm_dp_vcpi_allocation *pos;
5258 bool found = false;
5259 int vcpi = 0;
5260
5261 mst_state = drm_atomic_get_mst_topology_state(state, port->mgr);
5262
5263 if (IS_ERR(mst_state))
5264 return PTR_ERR(mst_state);
5265
5266 list_for_each_entry(pos, &mst_state->vcpis, next) {
5267 if (pos->port == port) {
5268 found = true;
5269 break;
5270 }
5271 }
5272
5273 if (!found) {
5274 DRM_DEBUG_ATOMIC("[MST PORT:%p] Couldn't find VCPI allocation in mst state %p\n",
5275 port, mst_state);
5276 return -EINVAL;
5277 }
5278
5279 if (pos->dsc_enabled == enable) {
5280 DRM_DEBUG_ATOMIC("[MST PORT:%p] DSC flag is already set to %d, returning %d VCPI slots\n",
5281 port, enable, pos->vcpi);
5282 vcpi = pos->vcpi;
5283 }
5284
5285 if (enable) {
5286 vcpi = drm_dp_atomic_find_vcpi_slots(state, port->mgr, port, pbn, pbn_div);
5287 DRM_DEBUG_ATOMIC("[MST PORT:%p] Enabling DSC flag, reallocating %d VCPI slots on the port\n",
5288 port, vcpi);
5289 if (vcpi < 0)
5290 return -EINVAL;
5291 }
5292
5293 pos->dsc_enabled = enable;
5294
5295 return vcpi;
5296}
5297EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
5298
5299
5300
5301
5302
5303
5304
5305
5306
5307
5308
5309
5310
5311
5312
5313
5314
5315
5316
5317
5318
5319int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
5320{
5321 struct drm_dp_mst_topology_mgr *mgr;
5322 struct drm_dp_mst_topology_state *mst_state;
5323 int i, ret = 0;
5324
5325 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
5326 if (!mgr->mst_state)
5327 continue;
5328
5329 ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state);
5330 if (ret)
5331 break;
5332
5333 mutex_lock(&mgr->lock);
5334 ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary,
5335 mst_state);
5336 mutex_unlock(&mgr->lock);
5337 if (ret < 0)
5338 break;
5339 else
5340 ret = 0;
5341 }
5342
5343 return ret;
5344}
5345EXPORT_SYMBOL(drm_dp_mst_atomic_check);
5346
5347const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
5348 .atomic_duplicate_state = drm_dp_mst_duplicate_state,
5349 .atomic_destroy_state = drm_dp_mst_destroy_state,
5350};
5351EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
5352
5353
5354
5355
5356
5357
5358
5359
5360
5361
5362
5363
5364
5365
5366
5367
5368struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
5369 struct drm_dp_mst_topology_mgr *mgr)
5370{
5371 return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
5372}
5373EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
5374
5375
5376
5377
5378
5379
5380
5381
5382
5383
5384
5385
5386int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
5387 struct drm_device *dev, struct drm_dp_aux *aux,
5388 int max_dpcd_transaction_bytes,
5389 int max_payloads, int conn_base_id)
5390{
5391 struct drm_dp_mst_topology_state *mst_state;
5392
5393 mutex_init(&mgr->lock);
5394 mutex_init(&mgr->qlock);
5395 mutex_init(&mgr->payload_lock);
5396 mutex_init(&mgr->delayed_destroy_lock);
5397 mutex_init(&mgr->up_req_lock);
5398 mutex_init(&mgr->probe_lock);
5399#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
5400 mutex_init(&mgr->topology_ref_history_lock);
5401#endif
5402 INIT_LIST_HEAD(&mgr->tx_msg_downq);
5403 INIT_LIST_HEAD(&mgr->destroy_port_list);
5404 INIT_LIST_HEAD(&mgr->destroy_branch_device_list);
5405 INIT_LIST_HEAD(&mgr->up_req_list);
5406
5407
5408
5409
5410
5411 mgr->delayed_destroy_wq = alloc_ordered_workqueue("drm_dp_mst_wq", 0);
5412 if (mgr->delayed_destroy_wq == NULL)
5413 return -ENOMEM;
5414
5415 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
5416 INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
5417 INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work);
5418 INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work);
5419 init_waitqueue_head(&mgr->tx_waitq);
5420 mgr->dev = dev;
5421 mgr->aux = aux;
5422 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
5423 mgr->max_payloads = max_payloads;
5424 mgr->conn_base_id = conn_base_id;
5425 if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
5426 max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
5427 return -EINVAL;
5428 mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
5429 if (!mgr->payloads)
5430 return -ENOMEM;
5431 mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
5432 if (!mgr->proposed_vcpis)
5433 return -ENOMEM;
5434 set_bit(0, &mgr->payload_mask);
5435
5436 mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
5437 if (mst_state == NULL)
5438 return -ENOMEM;
5439
5440 mst_state->mgr = mgr;
5441 INIT_LIST_HEAD(&mst_state->vcpis);
5442
5443 drm_atomic_private_obj_init(dev, &mgr->base,
5444 &mst_state->base,
5445 &drm_dp_mst_topology_state_funcs);
5446
5447 return 0;
5448}
5449EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
5450
5451
5452
5453
5454
5455void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
5456{
5457 drm_dp_mst_topology_mgr_set_mst(mgr, false);
5458 flush_work(&mgr->work);
5459
5460 if (mgr->delayed_destroy_wq) {
5461 destroy_workqueue(mgr->delayed_destroy_wq);
5462 mgr->delayed_destroy_wq = NULL;
5463 }
5464 mutex_lock(&mgr->payload_lock);
5465 kfree(mgr->payloads);
5466 mgr->payloads = NULL;
5467 kfree(mgr->proposed_vcpis);
5468 mgr->proposed_vcpis = NULL;
5469 mutex_unlock(&mgr->payload_lock);
5470 mgr->dev = NULL;
5471 mgr->aux = NULL;
5472 drm_atomic_private_obj_fini(&mgr->base);
5473 mgr->funcs = NULL;
5474
5475 mutex_destroy(&mgr->delayed_destroy_lock);
5476 mutex_destroy(&mgr->payload_lock);
5477 mutex_destroy(&mgr->qlock);
5478 mutex_destroy(&mgr->lock);
5479 mutex_destroy(&mgr->up_req_lock);
5480 mutex_destroy(&mgr->probe_lock);
5481#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
5482 mutex_destroy(&mgr->topology_ref_history_lock);
5483#endif
5484}
5485EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
5486
5487static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
5488{
5489 int i;
5490
5491 if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)
5492 return false;
5493
5494 for (i = 0; i < num - 1; i++) {
5495 if (msgs[i].flags & I2C_M_RD ||
5496 msgs[i].len > 0xff)
5497 return false;
5498 }
5499
5500 return msgs[num - 1].flags & I2C_M_RD &&
5501 msgs[num - 1].len <= 0xff;
5502}
5503
5504static bool remote_i2c_write_ok(const struct i2c_msg msgs[], int num)
5505{
5506 int i;
5507
5508 for (i = 0; i < num - 1; i++) {
5509 if (msgs[i].flags & I2C_M_RD || !(msgs[i].flags & I2C_M_STOP) ||
5510 msgs[i].len > 0xff)
5511 return false;
5512 }
5513
5514 return !(msgs[num - 1].flags & I2C_M_RD) && msgs[num - 1].len <= 0xff;
5515}
5516
5517static int drm_dp_mst_i2c_read(struct drm_dp_mst_branch *mstb,
5518 struct drm_dp_mst_port *port,
5519 struct i2c_msg *msgs, int num)
5520{
5521 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
5522 unsigned int i;
5523 struct drm_dp_sideband_msg_req_body msg;
5524 struct drm_dp_sideband_msg_tx *txmsg = NULL;
5525 int ret;
5526
5527 memset(&msg, 0, sizeof(msg));
5528 msg.req_type = DP_REMOTE_I2C_READ;
5529 msg.u.i2c_read.num_transactions = num - 1;
5530 msg.u.i2c_read.port_number = port->port_num;
5531 for (i = 0; i < num - 1; i++) {
5532 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
5533 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
5534 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
5535 msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
5536 }
5537 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
5538 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
5539
5540 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
5541 if (!txmsg) {
5542 ret = -ENOMEM;
5543 goto out;
5544 }
5545
5546 txmsg->dst = mstb;
5547 drm_dp_encode_sideband_req(&msg, txmsg);
5548
5549 drm_dp_queue_down_tx(mgr, txmsg);
5550
5551 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
5552 if (ret > 0) {
5553
5554 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
5555 ret = -EREMOTEIO;
5556 goto out;
5557 }
5558 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
5559 ret = -EIO;
5560 goto out;
5561 }
5562 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
5563 ret = num;
5564 }
5565out:
5566 kfree(txmsg);
5567 return ret;
5568}
5569
5570static int drm_dp_mst_i2c_write(struct drm_dp_mst_branch *mstb,
5571 struct drm_dp_mst_port *port,
5572 struct i2c_msg *msgs, int num)
5573{
5574 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
5575 unsigned int i;
5576 struct drm_dp_sideband_msg_req_body msg;
5577 struct drm_dp_sideband_msg_tx *txmsg = NULL;
5578 int ret;
5579
5580 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
5581 if (!txmsg) {
5582 ret = -ENOMEM;
5583 goto out;
5584 }
5585 for (i = 0; i < num; i++) {
5586 memset(&msg, 0, sizeof(msg));
5587 msg.req_type = DP_REMOTE_I2C_WRITE;
5588 msg.u.i2c_write.port_number = port->port_num;
5589 msg.u.i2c_write.write_i2c_device_id = msgs[i].addr;
5590 msg.u.i2c_write.num_bytes = msgs[i].len;
5591 msg.u.i2c_write.bytes = msgs[i].buf;
5592
5593 memset(txmsg, 0, sizeof(*txmsg));
5594 txmsg->dst = mstb;
5595
5596 drm_dp_encode_sideband_req(&msg, txmsg);
5597 drm_dp_queue_down_tx(mgr, txmsg);
5598
5599 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
5600 if (ret > 0) {
5601 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
5602 ret = -EREMOTEIO;
5603 goto out;
5604 }
5605 } else {
5606 goto out;
5607 }
5608 }
5609 ret = num;
5610out:
5611 kfree(txmsg);
5612 return ret;
5613}
5614
5615
5616static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter,
5617 struct i2c_msg *msgs, int num)
5618{
5619 struct drm_dp_aux *aux = adapter->algo_data;
5620 struct drm_dp_mst_port *port =
5621 container_of(aux, struct drm_dp_mst_port, aux);
5622 struct drm_dp_mst_branch *mstb;
5623 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
5624 int ret;
5625
5626 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
5627 if (!mstb)
5628 return -EREMOTEIO;
5629
5630 if (remote_i2c_read_ok(msgs, num)) {
5631 ret = drm_dp_mst_i2c_read(mstb, port, msgs, num);
5632 } else if (remote_i2c_write_ok(msgs, num)) {
5633 ret = drm_dp_mst_i2c_write(mstb, port, msgs, num);
5634 } else {
5635 DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
5636 ret = -EIO;
5637 }
5638
5639 drm_dp_mst_topology_put_mstb(mstb);
5640 return ret;
5641}
5642
5643static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
5644{
5645 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
5646 I2C_FUNC_SMBUS_READ_BLOCK_DATA |
5647 I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
5648 I2C_FUNC_10BIT_ADDR;
5649}
5650
5651static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
5652 .functionality = drm_dp_mst_i2c_functionality,
5653 .master_xfer = drm_dp_mst_i2c_xfer,
5654};
5655
5656
5657
5658
5659
5660
5661
5662static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port)
5663{
5664 struct drm_dp_aux *aux = &port->aux;
5665 struct device *parent_dev = port->mgr->dev->dev;
5666
5667 aux->ddc.algo = &drm_dp_mst_i2c_algo;
5668 aux->ddc.algo_data = aux;
5669 aux->ddc.retries = 3;
5670
5671 aux->ddc.class = I2C_CLASS_DDC;
5672 aux->ddc.owner = THIS_MODULE;
5673
5674 aux->ddc.dev.parent = parent_dev;
5675 aux->ddc.dev.of_node = parent_dev->of_node;
5676
5677 strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(parent_dev),
5678 sizeof(aux->ddc.name));
5679
5680 return i2c_add_adapter(&aux->ddc);
5681}
5682
5683
5684
5685
5686
5687static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port)
5688{
5689 i2c_del_adapter(&port->aux.ddc);
5690}
5691
5692
5693
5694
5695
5696
5697
5698
5699
5700
5701
5702
5703
5704
5705
5706
5707
5708static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port)
5709{
5710 struct drm_dp_mst_port *downstream_port;
5711
5712 if (!port || port->dpcd_rev < DP_DPCD_REV_14)
5713 return false;
5714
5715
5716 if (port->port_num >= 8)
5717 return true;
5718
5719
5720 if (port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV &&
5721 !port->mcs &&
5722 port->ldps)
5723 return true;
5724
5725
5726 mutex_lock(&port->mgr->lock);
5727 if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
5728 port->mstb &&
5729 port->mstb->num_ports == 2) {
5730 list_for_each_entry(downstream_port, &port->mstb->ports, next) {
5731 if (downstream_port->pdt == DP_PEER_DEVICE_SST_SINK &&
5732 !downstream_port->input) {
5733 mutex_unlock(&port->mgr->lock);
5734 return true;
5735 }
5736 }
5737 }
5738 mutex_unlock(&port->mgr->lock);
5739
5740 return false;
5741}
5742
5743
5744
5745
5746
5747
5748
5749
5750
5751
5752
5753
5754
5755
5756
5757
5758
5759struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
5760{
5761 struct drm_dp_mst_port *immediate_upstream_port;
5762 struct drm_dp_mst_port *fec_port;
5763 struct drm_dp_desc desc = {};
5764 u8 endpoint_fec;
5765 u8 endpoint_dsc;
5766
5767 if (!port)
5768 return NULL;
5769
5770 if (port->parent->port_parent)
5771 immediate_upstream_port = port->parent->port_parent;
5772 else
5773 immediate_upstream_port = NULL;
5774
5775 fec_port = immediate_upstream_port;
5776 while (fec_port) {
5777
5778
5779
5780
5781 if (!drm_dp_mst_is_virtual_dpcd(fec_port) &&
5782 !fec_port->fec_capable)
5783 return NULL;
5784
5785 fec_port = fec_port->parent->port_parent;
5786 }
5787
5788
5789 if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
5790 u8 upstream_dsc;
5791
5792 if (drm_dp_dpcd_read(&port->aux,
5793 DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
5794 return NULL;
5795 if (drm_dp_dpcd_read(&port->aux,
5796 DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
5797 return NULL;
5798 if (drm_dp_dpcd_read(&immediate_upstream_port->aux,
5799 DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
5800 return NULL;
5801
5802
5803 if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
5804 (endpoint_fec & DP_FEC_CAPABLE) &&
5805 (upstream_dsc & 0x2) )
5806 return &port->aux;
5807
5808
5809 return &immediate_upstream_port->aux;
5810 }
5811
5812
5813 if (drm_dp_mst_is_virtual_dpcd(port))
5814 return &port->aux;
5815
5816
5817
5818
5819
5820
5821
5822
5823
5824 if (drm_dp_read_desc(port->mgr->aux, &desc, true))
5825 return NULL;
5826
5827 if (drm_dp_has_quirk(&desc, 0,
5828 DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
5829 port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
5830 port->parent == port->mgr->mst_primary) {
5831 u8 downstreamport;
5832
5833 if (drm_dp_dpcd_read(&port->aux, DP_DOWNSTREAMPORT_PRESENT,
5834 &downstreamport, 1) < 0)
5835 return NULL;
5836
5837 if ((downstreamport & DP_DWN_STRM_PORT_PRESENT) &&
5838 ((downstreamport & DP_DWN_STRM_PORT_TYPE_MASK)
5839 != DP_DWN_STRM_PORT_TYPE_ANALOG))
5840 return port->mgr->aux;
5841 }
5842
5843
5844
5845
5846
5847
5848
5849 if (drm_dp_dpcd_read(&port->aux,
5850 DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
5851 return NULL;
5852 if (drm_dp_dpcd_read(&port->aux,
5853 DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
5854 return NULL;
5855 if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
5856 (endpoint_fec & DP_FEC_CAPABLE))
5857 return &port->aux;
5858
5859 return NULL;
5860}
5861EXPORT_SYMBOL(drm_dp_mst_dsc_aux_for_port);
5862