1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/delay.h>
24#include <linux/errno.h>
25#include <linux/i2c.h>
26#include <linux/init.h>
27#include <linux/kernel.h>
28#include <linux/sched.h>
29#include <linux/seq_file.h>
30#include <linux/iopoll.h>
31
32#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
33#include <linux/stacktrace.h>
34#include <linux/sort.h>
35#include <linux/timekeeping.h>
36#include <linux/math64.h>
37#endif
38
39#include <drm/drm_atomic.h>
40#include <drm/drm_atomic_helper.h>
41#include <drm/drm_dp_mst_helper.h>
42#include <drm/drm_drv.h>
43#include <drm/drm_print.h>
44#include <drm/drm_probe_helper.h>
45
46#include "drm_crtc_helper_internal.h"
47#include "drm_dp_mst_topology_internal.h"
48
49
50
51
52
53
54
55
56struct drm_dp_pending_up_req {
57 struct drm_dp_sideband_msg_hdr hdr;
58 struct drm_dp_sideband_msg_req_body msg;
59 struct list_head next;
60};
61
62static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
63 char *buf);
64
65static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
66
67static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
68 int id,
69 struct drm_dp_payload *payload);
70
71static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
72 struct drm_dp_mst_port *port,
73 int offset, int size, u8 *bytes);
74static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
75 struct drm_dp_mst_port *port,
76 int offset, int size, u8 *bytes);
77
78static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
79 struct drm_dp_mst_branch *mstb);
80
81static void
82drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
83 struct drm_dp_mst_branch *mstb);
84
85static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
86 struct drm_dp_mst_branch *mstb,
87 struct drm_dp_mst_port *port);
88static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
89 u8 *guid);
90
91static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port);
92static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port);
93static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
94
95#define DBG_PREFIX "[dp_mst]"
96
97#define DP_STR(x) [DP_ ## x] = #x
98
99static const char *drm_dp_mst_req_type_str(u8 req_type)
100{
101 static const char * const req_type_str[] = {
102 DP_STR(GET_MSG_TRANSACTION_VERSION),
103 DP_STR(LINK_ADDRESS),
104 DP_STR(CONNECTION_STATUS_NOTIFY),
105 DP_STR(ENUM_PATH_RESOURCES),
106 DP_STR(ALLOCATE_PAYLOAD),
107 DP_STR(QUERY_PAYLOAD),
108 DP_STR(RESOURCE_STATUS_NOTIFY),
109 DP_STR(CLEAR_PAYLOAD_ID_TABLE),
110 DP_STR(REMOTE_DPCD_READ),
111 DP_STR(REMOTE_DPCD_WRITE),
112 DP_STR(REMOTE_I2C_READ),
113 DP_STR(REMOTE_I2C_WRITE),
114 DP_STR(POWER_UP_PHY),
115 DP_STR(POWER_DOWN_PHY),
116 DP_STR(SINK_EVENT_NOTIFY),
117 DP_STR(QUERY_STREAM_ENC_STATUS),
118 };
119
120 if (req_type >= ARRAY_SIZE(req_type_str) ||
121 !req_type_str[req_type])
122 return "unknown";
123
124 return req_type_str[req_type];
125}
126
127#undef DP_STR
128#define DP_STR(x) [DP_NAK_ ## x] = #x
129
130static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
131{
132 static const char * const nak_reason_str[] = {
133 DP_STR(WRITE_FAILURE),
134 DP_STR(INVALID_READ),
135 DP_STR(CRC_FAILURE),
136 DP_STR(BAD_PARAM),
137 DP_STR(DEFER),
138 DP_STR(LINK_FAILURE),
139 DP_STR(NO_RESOURCES),
140 DP_STR(DPCD_FAIL),
141 DP_STR(I2C_NAK),
142 DP_STR(ALLOCATE_FAIL),
143 };
144
145 if (nak_reason >= ARRAY_SIZE(nak_reason_str) ||
146 !nak_reason_str[nak_reason])
147 return "unknown";
148
149 return nak_reason_str[nak_reason];
150}
151
152#undef DP_STR
153#define DP_STR(x) [DRM_DP_SIDEBAND_TX_ ## x] = #x
154
155static const char *drm_dp_mst_sideband_tx_state_str(int state)
156{
157 static const char * const sideband_reason_str[] = {
158 DP_STR(QUEUED),
159 DP_STR(START_SEND),
160 DP_STR(SENT),
161 DP_STR(RX),
162 DP_STR(TIMEOUT),
163 };
164
165 if (state >= ARRAY_SIZE(sideband_reason_str) ||
166 !sideband_reason_str[state])
167 return "unknown";
168
169 return sideband_reason_str[state];
170}
171
172static int
173drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)
174{
175 int i;
176 u8 unpacked_rad[16];
177
178 for (i = 0; i < lct; i++) {
179 if (i % 2)
180 unpacked_rad[i] = rad[i / 2] >> 4;
181 else
182 unpacked_rad[i] = rad[i / 2] & BIT_MASK(4);
183 }
184
185
186
187
188 return snprintf(out, len, "%*phC", lct, unpacked_rad);
189}
190
191
192static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
193{
194 u8 bitmask = 0x80;
195 u8 bitshift = 7;
196 u8 array_index = 0;
197 int number_of_bits = num_nibbles * 4;
198 u8 remainder = 0;
199
200 while (number_of_bits != 0) {
201 number_of_bits--;
202 remainder <<= 1;
203 remainder |= (data[array_index] & bitmask) >> bitshift;
204 bitmask >>= 1;
205 bitshift--;
206 if (bitmask == 0) {
207 bitmask = 0x80;
208 bitshift = 7;
209 array_index++;
210 }
211 if ((remainder & 0x10) == 0x10)
212 remainder ^= 0x13;
213 }
214
215 number_of_bits = 4;
216 while (number_of_bits != 0) {
217 number_of_bits--;
218 remainder <<= 1;
219 if ((remainder & 0x10) != 0)
220 remainder ^= 0x13;
221 }
222
223 return remainder;
224}
225
226static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
227{
228 u8 bitmask = 0x80;
229 u8 bitshift = 7;
230 u8 array_index = 0;
231 int number_of_bits = number_of_bytes * 8;
232 u16 remainder = 0;
233
234 while (number_of_bits != 0) {
235 number_of_bits--;
236 remainder <<= 1;
237 remainder |= (data[array_index] & bitmask) >> bitshift;
238 bitmask >>= 1;
239 bitshift--;
240 if (bitmask == 0) {
241 bitmask = 0x80;
242 bitshift = 7;
243 array_index++;
244 }
245 if ((remainder & 0x100) == 0x100)
246 remainder ^= 0xd5;
247 }
248
249 number_of_bits = 8;
250 while (number_of_bits != 0) {
251 number_of_bits--;
252 remainder <<= 1;
253 if ((remainder & 0x100) != 0)
254 remainder ^= 0xd5;
255 }
256
257 return remainder & 0xff;
258}
259static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
260{
261 u8 size = 3;
262
263 size += (hdr->lct / 2);
264 return size;
265}
266
267static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
268 u8 *buf, int *len)
269{
270 int idx = 0;
271 int i;
272 u8 crc4;
273
274 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
275 for (i = 0; i < (hdr->lct / 2); i++)
276 buf[idx++] = hdr->rad[i];
277 buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
278 (hdr->msg_len & 0x3f);
279 buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
280
281 crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
282 buf[idx - 1] |= (crc4 & 0xf);
283
284 *len = idx;
285}
286
287static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
288 u8 *buf, int buflen, u8 *hdrlen)
289{
290 u8 crc4;
291 u8 len;
292 int i;
293 u8 idx;
294
295 if (buf[0] == 0)
296 return false;
297 len = 3;
298 len += ((buf[0] & 0xf0) >> 4) / 2;
299 if (len > buflen)
300 return false;
301 crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
302
303 if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
304 DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
305 return false;
306 }
307
308 hdr->lct = (buf[0] & 0xf0) >> 4;
309 hdr->lcr = (buf[0] & 0xf);
310 idx = 1;
311 for (i = 0; i < (hdr->lct / 2); i++)
312 hdr->rad[i] = buf[idx++];
313 hdr->broadcast = (buf[idx] >> 7) & 0x1;
314 hdr->path_msg = (buf[idx] >> 6) & 0x1;
315 hdr->msg_len = buf[idx] & 0x3f;
316 idx++;
317 hdr->somt = (buf[idx] >> 7) & 0x1;
318 hdr->eomt = (buf[idx] >> 6) & 0x1;
319 hdr->seqno = (buf[idx] >> 4) & 0x1;
320 idx++;
321 *hdrlen = idx;
322 return true;
323}
324
325void
326drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
327 struct drm_dp_sideband_msg_tx *raw)
328{
329 int idx = 0;
330 int i;
331 u8 *buf = raw->msg;
332
333 buf[idx++] = req->req_type & 0x7f;
334
335 switch (req->req_type) {
336 case DP_ENUM_PATH_RESOURCES:
337 case DP_POWER_DOWN_PHY:
338 case DP_POWER_UP_PHY:
339 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
340 idx++;
341 break;
342 case DP_ALLOCATE_PAYLOAD:
343 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
344 (req->u.allocate_payload.number_sdp_streams & 0xf);
345 idx++;
346 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
347 idx++;
348 buf[idx] = (req->u.allocate_payload.pbn >> 8);
349 idx++;
350 buf[idx] = (req->u.allocate_payload.pbn & 0xff);
351 idx++;
352 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
353 buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
354 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
355 idx++;
356 }
357 if (req->u.allocate_payload.number_sdp_streams & 1) {
358 i = req->u.allocate_payload.number_sdp_streams - 1;
359 buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
360 idx++;
361 }
362 break;
363 case DP_QUERY_PAYLOAD:
364 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
365 idx++;
366 buf[idx] = (req->u.query_payload.vcpi & 0x7f);
367 idx++;
368 break;
369 case DP_REMOTE_DPCD_READ:
370 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
371 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
372 idx++;
373 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
374 idx++;
375 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
376 idx++;
377 buf[idx] = (req->u.dpcd_read.num_bytes);
378 idx++;
379 break;
380
381 case DP_REMOTE_DPCD_WRITE:
382 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
383 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
384 idx++;
385 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
386 idx++;
387 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
388 idx++;
389 buf[idx] = (req->u.dpcd_write.num_bytes);
390 idx++;
391 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
392 idx += req->u.dpcd_write.num_bytes;
393 break;
394 case DP_REMOTE_I2C_READ:
395 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
396 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
397 idx++;
398 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
399 buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
400 idx++;
401 buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
402 idx++;
403 memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
404 idx += req->u.i2c_read.transactions[i].num_bytes;
405
406 buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4;
407 buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
408 idx++;
409 }
410 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
411 idx++;
412 buf[idx] = (req->u.i2c_read.num_bytes_read);
413 idx++;
414 break;
415
416 case DP_REMOTE_I2C_WRITE:
417 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
418 idx++;
419 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
420 idx++;
421 buf[idx] = (req->u.i2c_write.num_bytes);
422 idx++;
423 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
424 idx += req->u.i2c_write.num_bytes;
425 break;
426 }
427 raw->cur_len = idx;
428}
429EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_encode_sideband_req);
430
431
432int
433drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
434 struct drm_dp_sideband_msg_req_body *req)
435{
436 const u8 *buf = raw->msg;
437 int i, idx = 0;
438
439 req->req_type = buf[idx++] & 0x7f;
440 switch (req->req_type) {
441 case DP_ENUM_PATH_RESOURCES:
442 case DP_POWER_DOWN_PHY:
443 case DP_POWER_UP_PHY:
444 req->u.port_num.port_number = (buf[idx] >> 4) & 0xf;
445 break;
446 case DP_ALLOCATE_PAYLOAD:
447 {
448 struct drm_dp_allocate_payload *a =
449 &req->u.allocate_payload;
450
451 a->number_sdp_streams = buf[idx] & 0xf;
452 a->port_number = (buf[idx] >> 4) & 0xf;
453
454 WARN_ON(buf[++idx] & 0x80);
455 a->vcpi = buf[idx] & 0x7f;
456
457 a->pbn = buf[++idx] << 8;
458 a->pbn |= buf[++idx];
459
460 idx++;
461 for (i = 0; i < a->number_sdp_streams; i++) {
462 a->sdp_stream_sink[i] =
463 (buf[idx + (i / 2)] >> ((i % 2) ? 0 : 4)) & 0xf;
464 }
465 }
466 break;
467 case DP_QUERY_PAYLOAD:
468 req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf;
469 WARN_ON(buf[++idx] & 0x80);
470 req->u.query_payload.vcpi = buf[idx] & 0x7f;
471 break;
472 case DP_REMOTE_DPCD_READ:
473 {
474 struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read;
475
476 r->port_number = (buf[idx] >> 4) & 0xf;
477
478 r->dpcd_address = (buf[idx] << 16) & 0xf0000;
479 r->dpcd_address |= (buf[++idx] << 8) & 0xff00;
480 r->dpcd_address |= buf[++idx] & 0xff;
481
482 r->num_bytes = buf[++idx];
483 }
484 break;
485 case DP_REMOTE_DPCD_WRITE:
486 {
487 struct drm_dp_remote_dpcd_write *w =
488 &req->u.dpcd_write;
489
490 w->port_number = (buf[idx] >> 4) & 0xf;
491
492 w->dpcd_address = (buf[idx] << 16) & 0xf0000;
493 w->dpcd_address |= (buf[++idx] << 8) & 0xff00;
494 w->dpcd_address |= buf[++idx] & 0xff;
495
496 w->num_bytes = buf[++idx];
497
498 w->bytes = kmemdup(&buf[++idx], w->num_bytes,
499 GFP_KERNEL);
500 if (!w->bytes)
501 return -ENOMEM;
502 }
503 break;
504 case DP_REMOTE_I2C_READ:
505 {
506 struct drm_dp_remote_i2c_read *r = &req->u.i2c_read;
507 struct drm_dp_remote_i2c_read_tx *tx;
508 bool failed = false;
509
510 r->num_transactions = buf[idx] & 0x3;
511 r->port_number = (buf[idx] >> 4) & 0xf;
512 for (i = 0; i < r->num_transactions; i++) {
513 tx = &r->transactions[i];
514
515 tx->i2c_dev_id = buf[++idx] & 0x7f;
516 tx->num_bytes = buf[++idx];
517 tx->bytes = kmemdup(&buf[++idx],
518 tx->num_bytes,
519 GFP_KERNEL);
520 if (!tx->bytes) {
521 failed = true;
522 break;
523 }
524 idx += tx->num_bytes;
525 tx->no_stop_bit = (buf[idx] >> 5) & 0x1;
526 tx->i2c_transaction_delay = buf[idx] & 0xf;
527 }
528
529 if (failed) {
530 for (i = 0; i < r->num_transactions; i++) {
531 tx = &r->transactions[i];
532 kfree(tx->bytes);
533 }
534 return -ENOMEM;
535 }
536
537 r->read_i2c_device_id = buf[++idx] & 0x7f;
538 r->num_bytes_read = buf[++idx];
539 }
540 break;
541 case DP_REMOTE_I2C_WRITE:
542 {
543 struct drm_dp_remote_i2c_write *w = &req->u.i2c_write;
544
545 w->port_number = (buf[idx] >> 4) & 0xf;
546 w->write_i2c_device_id = buf[++idx] & 0x7f;
547 w->num_bytes = buf[++idx];
548 w->bytes = kmemdup(&buf[++idx], w->num_bytes,
549 GFP_KERNEL);
550 if (!w->bytes)
551 return -ENOMEM;
552 }
553 break;
554 }
555
556 return 0;
557}
558EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_decode_sideband_req);
559
560void
561drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req,
562 int indent, struct drm_printer *printer)
563{
564 int i;
565
566#define P(f, ...) drm_printf_indent(printer, indent, f, ##__VA_ARGS__)
567 if (req->req_type == DP_LINK_ADDRESS) {
568
569 P("type=%s\n", drm_dp_mst_req_type_str(req->req_type));
570 return;
571 }
572
573 P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type));
574 indent++;
575
576 switch (req->req_type) {
577 case DP_ENUM_PATH_RESOURCES:
578 case DP_POWER_DOWN_PHY:
579 case DP_POWER_UP_PHY:
580 P("port=%d\n", req->u.port_num.port_number);
581 break;
582 case DP_ALLOCATE_PAYLOAD:
583 P("port=%d vcpi=%d pbn=%d sdp_streams=%d %*ph\n",
584 req->u.allocate_payload.port_number,
585 req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn,
586 req->u.allocate_payload.number_sdp_streams,
587 req->u.allocate_payload.number_sdp_streams,
588 req->u.allocate_payload.sdp_stream_sink);
589 break;
590 case DP_QUERY_PAYLOAD:
591 P("port=%d vcpi=%d\n",
592 req->u.query_payload.port_number,
593 req->u.query_payload.vcpi);
594 break;
595 case DP_REMOTE_DPCD_READ:
596 P("port=%d dpcd_addr=%05x len=%d\n",
597 req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address,
598 req->u.dpcd_read.num_bytes);
599 break;
600 case DP_REMOTE_DPCD_WRITE:
601 P("port=%d addr=%05x len=%d: %*ph\n",
602 req->u.dpcd_write.port_number,
603 req->u.dpcd_write.dpcd_address,
604 req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes,
605 req->u.dpcd_write.bytes);
606 break;
607 case DP_REMOTE_I2C_READ:
608 P("port=%d num_tx=%d id=%d size=%d:\n",
609 req->u.i2c_read.port_number,
610 req->u.i2c_read.num_transactions,
611 req->u.i2c_read.read_i2c_device_id,
612 req->u.i2c_read.num_bytes_read);
613
614 indent++;
615 for (i = 0; i < req->u.i2c_read.num_transactions; i++) {
616 const struct drm_dp_remote_i2c_read_tx *rtx =
617 &req->u.i2c_read.transactions[i];
618
619 P("%d: id=%03d size=%03d no_stop_bit=%d tx_delay=%03d: %*ph\n",
620 i, rtx->i2c_dev_id, rtx->num_bytes,
621 rtx->no_stop_bit, rtx->i2c_transaction_delay,
622 rtx->num_bytes, rtx->bytes);
623 }
624 break;
625 case DP_REMOTE_I2C_WRITE:
626 P("port=%d id=%d size=%d: %*ph\n",
627 req->u.i2c_write.port_number,
628 req->u.i2c_write.write_i2c_device_id,
629 req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes,
630 req->u.i2c_write.bytes);
631 break;
632 default:
633 P("???\n");
634 break;
635 }
636#undef P
637}
638EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_dump_sideband_msg_req_body);
639
640static inline void
641drm_dp_mst_dump_sideband_msg_tx(struct drm_printer *p,
642 const struct drm_dp_sideband_msg_tx *txmsg)
643{
644 struct drm_dp_sideband_msg_req_body req;
645 char buf[64];
646 int ret;
647 int i;
648
649 drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf,
650 sizeof(buf));
651 drm_printf(p, "txmsg cur_offset=%x cur_len=%x seqno=%x state=%s path_msg=%d dst=%s\n",
652 txmsg->cur_offset, txmsg->cur_len, txmsg->seqno,
653 drm_dp_mst_sideband_tx_state_str(txmsg->state),
654 txmsg->path_msg, buf);
655
656 ret = drm_dp_decode_sideband_req(txmsg, &req);
657 if (ret) {
658 drm_printf(p, "<failed to decode sideband req: %d>\n", ret);
659 return;
660 }
661 drm_dp_dump_sideband_msg_req_body(&req, 1, p);
662
663 switch (req.req_type) {
664 case DP_REMOTE_DPCD_WRITE:
665 kfree(req.u.dpcd_write.bytes);
666 break;
667 case DP_REMOTE_I2C_READ:
668 for (i = 0; i < req.u.i2c_read.num_transactions; i++)
669 kfree(req.u.i2c_read.transactions[i].bytes);
670 break;
671 case DP_REMOTE_I2C_WRITE:
672 kfree(req.u.i2c_write.bytes);
673 break;
674 }
675}
676
677static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
678{
679 u8 crc4;
680
681 crc4 = drm_dp_msg_data_crc4(msg, len);
682 msg[len] = crc4;
683}
684
685static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
686 struct drm_dp_sideband_msg_tx *raw)
687{
688 int idx = 0;
689 u8 *buf = raw->msg;
690
691 buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
692
693 raw->cur_len = idx;
694}
695
696static int drm_dp_sideband_msg_set_header(struct drm_dp_sideband_msg_rx *msg,
697 struct drm_dp_sideband_msg_hdr *hdr,
698 u8 hdrlen)
699{
700
701
702
703
704 if (!hdr->somt && !msg->have_somt)
705 return false;
706
707
708 msg->curchunk_idx = 0;
709 msg->curchunk_len = hdr->msg_len;
710 msg->curchunk_hdrlen = hdrlen;
711
712
713 if (hdr->somt && msg->have_somt)
714 return false;
715
716 if (hdr->somt) {
717 memcpy(&msg->initial_hdr, hdr,
718 sizeof(struct drm_dp_sideband_msg_hdr));
719 msg->have_somt = true;
720 }
721 if (hdr->eomt)
722 msg->have_eomt = true;
723
724 return true;
725}
726
727
728static bool drm_dp_sideband_append_payload(struct drm_dp_sideband_msg_rx *msg,
729 u8 *replybuf, u8 replybuflen)
730{
731 u8 crc4;
732
733 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
734 msg->curchunk_idx += replybuflen;
735
736 if (msg->curchunk_idx >= msg->curchunk_len) {
737
738 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
739 if (crc4 != msg->chunk[msg->curchunk_len - 1])
740 print_hex_dump(KERN_DEBUG, "wrong crc",
741 DUMP_PREFIX_NONE, 16, 1,
742 msg->chunk, msg->curchunk_len, false);
743
744 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
745 msg->curlen += msg->curchunk_len - 1;
746 }
747 return true;
748}
749
750static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
751 struct drm_dp_sideband_msg_reply_body *repmsg)
752{
753 int idx = 1;
754 int i;
755
756 memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
757 idx += 16;
758 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
759 idx++;
760 if (idx > raw->curlen)
761 goto fail_len;
762 for (i = 0; i < repmsg->u.link_addr.nports; i++) {
763 if (raw->msg[idx] & 0x80)
764 repmsg->u.link_addr.ports[i].input_port = 1;
765
766 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
767 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
768
769 idx++;
770 if (idx > raw->curlen)
771 goto fail_len;
772 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
773 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
774 if (repmsg->u.link_addr.ports[i].input_port == 0)
775 repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
776 idx++;
777 if (idx > raw->curlen)
778 goto fail_len;
779 if (repmsg->u.link_addr.ports[i].input_port == 0) {
780 repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
781 idx++;
782 if (idx > raw->curlen)
783 goto fail_len;
784 memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
785 idx += 16;
786 if (idx > raw->curlen)
787 goto fail_len;
788 repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
789 repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
790 idx++;
791
792 }
793 if (idx > raw->curlen)
794 goto fail_len;
795 }
796
797 return true;
798fail_len:
799 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
800 return false;
801}
802
803static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
804 struct drm_dp_sideband_msg_reply_body *repmsg)
805{
806 int idx = 1;
807
808 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
809 idx++;
810 if (idx > raw->curlen)
811 goto fail_len;
812 repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
813 idx++;
814 if (idx > raw->curlen)
815 goto fail_len;
816
817 memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
818 return true;
819fail_len:
820 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
821 return false;
822}
823
824static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
825 struct drm_dp_sideband_msg_reply_body *repmsg)
826{
827 int idx = 1;
828
829 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
830 idx++;
831 if (idx > raw->curlen)
832 goto fail_len;
833 return true;
834fail_len:
835 DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
836 return false;
837}
838
839static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
840 struct drm_dp_sideband_msg_reply_body *repmsg)
841{
842 int idx = 1;
843
844 repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
845 idx++;
846 if (idx > raw->curlen)
847 goto fail_len;
848 repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
849 idx++;
850
851 memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
852 return true;
853fail_len:
854 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
855 return false;
856}
857
858static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
859 struct drm_dp_sideband_msg_reply_body *repmsg)
860{
861 int idx = 1;
862
863 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
864 repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1;
865 idx++;
866 if (idx > raw->curlen)
867 goto fail_len;
868 repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
869 idx += 2;
870 if (idx > raw->curlen)
871 goto fail_len;
872 repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
873 idx += 2;
874 if (idx > raw->curlen)
875 goto fail_len;
876 return true;
877fail_len:
878 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
879 return false;
880}
881
882static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
883 struct drm_dp_sideband_msg_reply_body *repmsg)
884{
885 int idx = 1;
886
887 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
888 idx++;
889 if (idx > raw->curlen)
890 goto fail_len;
891 repmsg->u.allocate_payload.vcpi = raw->msg[idx];
892 idx++;
893 if (idx > raw->curlen)
894 goto fail_len;
895 repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
896 idx += 2;
897 if (idx > raw->curlen)
898 goto fail_len;
899 return true;
900fail_len:
901 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
902 return false;
903}
904
905static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
906 struct drm_dp_sideband_msg_reply_body *repmsg)
907{
908 int idx = 1;
909
910 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
911 idx++;
912 if (idx > raw->curlen)
913 goto fail_len;
914 repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
915 idx += 2;
916 if (idx > raw->curlen)
917 goto fail_len;
918 return true;
919fail_len:
920 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
921 return false;
922}
923
924static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,
925 struct drm_dp_sideband_msg_reply_body *repmsg)
926{
927 int idx = 1;
928
929 repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;
930 idx++;
931 if (idx > raw->curlen) {
932 DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n",
933 idx, raw->curlen);
934 return false;
935 }
936 return true;
937}
938
939static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
940 struct drm_dp_sideband_msg_reply_body *msg)
941{
942 memset(msg, 0, sizeof(*msg));
943 msg->reply_type = (raw->msg[0] & 0x80) >> 7;
944 msg->req_type = (raw->msg[0] & 0x7f);
945
946 if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
947 memcpy(msg->u.nak.guid, &raw->msg[1], 16);
948 msg->u.nak.reason = raw->msg[17];
949 msg->u.nak.nak_data = raw->msg[18];
950 return false;
951 }
952
953 switch (msg->req_type) {
954 case DP_LINK_ADDRESS:
955 return drm_dp_sideband_parse_link_address(raw, msg);
956 case DP_QUERY_PAYLOAD:
957 return drm_dp_sideband_parse_query_payload_ack(raw, msg);
958 case DP_REMOTE_DPCD_READ:
959 return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
960 case DP_REMOTE_DPCD_WRITE:
961 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
962 case DP_REMOTE_I2C_READ:
963 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
964 case DP_ENUM_PATH_RESOURCES:
965 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
966 case DP_ALLOCATE_PAYLOAD:
967 return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
968 case DP_POWER_DOWN_PHY:
969 case DP_POWER_UP_PHY:
970 return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
971 case DP_CLEAR_PAYLOAD_ID_TABLE:
972 return true;
973 default:
974 DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type,
975 drm_dp_mst_req_type_str(msg->req_type));
976 return false;
977 }
978}
979
980static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
981 struct drm_dp_sideband_msg_req_body *msg)
982{
983 int idx = 1;
984
985 msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
986 idx++;
987 if (idx > raw->curlen)
988 goto fail_len;
989
990 memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
991 idx += 16;
992 if (idx > raw->curlen)
993 goto fail_len;
994
995 msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
996 msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
997 msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
998 msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
999 msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
1000 idx++;
1001 return true;
1002fail_len:
1003 DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
1004 return false;
1005}
1006
1007static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
1008 struct drm_dp_sideband_msg_req_body *msg)
1009{
1010 int idx = 1;
1011
1012 msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
1013 idx++;
1014 if (idx > raw->curlen)
1015 goto fail_len;
1016
1017 memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
1018 idx += 16;
1019 if (idx > raw->curlen)
1020 goto fail_len;
1021
1022 msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
1023 idx++;
1024 return true;
1025fail_len:
1026 DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
1027 return false;
1028}
1029
1030static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
1031 struct drm_dp_sideband_msg_req_body *msg)
1032{
1033 memset(msg, 0, sizeof(*msg));
1034 msg->req_type = (raw->msg[0] & 0x7f);
1035
1036 switch (msg->req_type) {
1037 case DP_CONNECTION_STATUS_NOTIFY:
1038 return drm_dp_sideband_parse_connection_status_notify(raw, msg);
1039 case DP_RESOURCE_STATUS_NOTIFY:
1040 return drm_dp_sideband_parse_resource_status_notify(raw, msg);
1041 default:
1042 DRM_ERROR("Got unknown request 0x%02x (%s)\n", msg->req_type,
1043 drm_dp_mst_req_type_str(msg->req_type));
1044 return false;
1045 }
1046}
1047
1048static void build_dpcd_write(struct drm_dp_sideband_msg_tx *msg,
1049 u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
1050{
1051 struct drm_dp_sideband_msg_req_body req;
1052
1053 req.req_type = DP_REMOTE_DPCD_WRITE;
1054 req.u.dpcd_write.port_number = port_num;
1055 req.u.dpcd_write.dpcd_address = offset;
1056 req.u.dpcd_write.num_bytes = num_bytes;
1057 req.u.dpcd_write.bytes = bytes;
1058 drm_dp_encode_sideband_req(&req, msg);
1059}
1060
1061static void build_link_address(struct drm_dp_sideband_msg_tx *msg)
1062{
1063 struct drm_dp_sideband_msg_req_body req;
1064
1065 req.req_type = DP_LINK_ADDRESS;
1066 drm_dp_encode_sideband_req(&req, msg);
1067}
1068
1069static void build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
1070{
1071 struct drm_dp_sideband_msg_req_body req;
1072
1073 req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
1074 drm_dp_encode_sideband_req(&req, msg);
1075}
1076
1077static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg,
1078 int port_num)
1079{
1080 struct drm_dp_sideband_msg_req_body req;
1081
1082 req.req_type = DP_ENUM_PATH_RESOURCES;
1083 req.u.port_num.port_number = port_num;
1084 drm_dp_encode_sideband_req(&req, msg);
1085 msg->path_msg = true;
1086 return 0;
1087}
1088
1089static void build_allocate_payload(struct drm_dp_sideband_msg_tx *msg,
1090 int port_num,
1091 u8 vcpi, uint16_t pbn,
1092 u8 number_sdp_streams,
1093 u8 *sdp_stream_sink)
1094{
1095 struct drm_dp_sideband_msg_req_body req;
1096
1097 memset(&req, 0, sizeof(req));
1098 req.req_type = DP_ALLOCATE_PAYLOAD;
1099 req.u.allocate_payload.port_number = port_num;
1100 req.u.allocate_payload.vcpi = vcpi;
1101 req.u.allocate_payload.pbn = pbn;
1102 req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
1103 memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
1104 number_sdp_streams);
1105 drm_dp_encode_sideband_req(&req, msg);
1106 msg->path_msg = true;
1107}
1108
1109static void build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
1110 int port_num, bool power_up)
1111{
1112 struct drm_dp_sideband_msg_req_body req;
1113
1114 if (power_up)
1115 req.req_type = DP_POWER_UP_PHY;
1116 else
1117 req.req_type = DP_POWER_DOWN_PHY;
1118
1119 req.u.port_num.port_number = port_num;
1120 drm_dp_encode_sideband_req(&req, msg);
1121 msg->path_msg = true;
1122}
1123
1124static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
1125 struct drm_dp_vcpi *vcpi)
1126{
1127 int ret, vcpi_ret;
1128
1129 mutex_lock(&mgr->payload_lock);
1130 ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
1131 if (ret > mgr->max_payloads) {
1132 ret = -EINVAL;
1133 DRM_DEBUG_KMS("out of payload ids %d\n", ret);
1134 goto out_unlock;
1135 }
1136
1137 vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
1138 if (vcpi_ret > mgr->max_payloads) {
1139 ret = -EINVAL;
1140 DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
1141 goto out_unlock;
1142 }
1143
1144 set_bit(ret, &mgr->payload_mask);
1145 set_bit(vcpi_ret, &mgr->vcpi_mask);
1146 vcpi->vcpi = vcpi_ret + 1;
1147 mgr->proposed_vcpis[ret - 1] = vcpi;
1148out_unlock:
1149 mutex_unlock(&mgr->payload_lock);
1150 return ret;
1151}
1152
1153static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
1154 int vcpi)
1155{
1156 int i;
1157
1158 if (vcpi == 0)
1159 return;
1160
1161 mutex_lock(&mgr->payload_lock);
1162 DRM_DEBUG_KMS("putting payload %d\n", vcpi);
1163 clear_bit(vcpi - 1, &mgr->vcpi_mask);
1164
1165 for (i = 0; i < mgr->max_payloads; i++) {
1166 if (mgr->proposed_vcpis[i] &&
1167 mgr->proposed_vcpis[i]->vcpi == vcpi) {
1168 mgr->proposed_vcpis[i] = NULL;
1169 clear_bit(i + 1, &mgr->payload_mask);
1170 }
1171 }
1172 mutex_unlock(&mgr->payload_lock);
1173}
1174
1175static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
1176 struct drm_dp_sideband_msg_tx *txmsg)
1177{
1178 unsigned int state;
1179
1180
1181
1182
1183
1184
1185 state = READ_ONCE(txmsg->state);
1186 return (state == DRM_DP_SIDEBAND_TX_RX ||
1187 state == DRM_DP_SIDEBAND_TX_TIMEOUT);
1188}
1189
1190static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
1191 struct drm_dp_sideband_msg_tx *txmsg)
1192{
1193 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1194 unsigned long wait_timeout = msecs_to_jiffies(4000);
1195 unsigned long wait_expires = jiffies + wait_timeout;
1196 int ret;
1197
1198 for (;;) {
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212 ret = wait_event_timeout(mgr->tx_waitq,
1213 check_txmsg_state(mgr, txmsg),
1214 mgr->cbs->poll_hpd_irq ?
1215 msecs_to_jiffies(50) :
1216 wait_timeout);
1217
1218 if (ret || !mgr->cbs->poll_hpd_irq ||
1219 time_after(jiffies, wait_expires))
1220 break;
1221
1222 mgr->cbs->poll_hpd_irq(mgr);
1223 }
1224
1225 mutex_lock(&mgr->qlock);
1226 if (ret > 0) {
1227 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
1228 ret = -EIO;
1229 goto out;
1230 }
1231 } else {
1232 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
1233
1234
1235 ret = -EIO;
1236
1237
1238 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
1239 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
1240 txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
1241 list_del(&txmsg->next);
1242 }
1243out:
1244 if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
1245 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
1246
1247 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
1248 }
1249 mutex_unlock(&mgr->qlock);
1250
1251 drm_dp_mst_kick_tx(mgr);
1252 return ret;
1253}
1254
1255static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
1256{
1257 struct drm_dp_mst_branch *mstb;
1258
1259 mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
1260 if (!mstb)
1261 return NULL;
1262
1263 mstb->lct = lct;
1264 if (lct > 1)
1265 memcpy(mstb->rad, rad, lct / 2);
1266 INIT_LIST_HEAD(&mstb->ports);
1267 kref_init(&mstb->topology_kref);
1268 kref_init(&mstb->malloc_kref);
1269 return mstb;
1270}
1271
1272static void drm_dp_free_mst_branch_device(struct kref *kref)
1273{
1274 struct drm_dp_mst_branch *mstb =
1275 container_of(kref, struct drm_dp_mst_branch, malloc_kref);
1276
1277 if (mstb->port_parent)
1278 drm_dp_mst_put_port_malloc(mstb->port_parent);
1279
1280 kfree(mstb);
1281}
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382static void
1383drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
1384{
1385 kref_get(&mstb->malloc_kref);
1386 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
1387}
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400static void
1401drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
1402{
1403 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
1404 kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
1405}
1406
1407static void drm_dp_free_mst_port(struct kref *kref)
1408{
1409 struct drm_dp_mst_port *port =
1410 container_of(kref, struct drm_dp_mst_port, malloc_kref);
1411
1412 drm_dp_mst_put_mstb_malloc(port->parent);
1413 kfree(port);
1414}
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433void
1434drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
1435{
1436 kref_get(&port->malloc_kref);
1437 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref));
1438}
1439EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451void
1452drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
1453{
1454 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
1455 kref_put(&port->malloc_kref, drm_dp_free_mst_port);
1456}
1457EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
1458
1459#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
1460
1461#define STACK_DEPTH 8
1462
1463static noinline void
1464__topology_ref_save(struct drm_dp_mst_topology_mgr *mgr,
1465 struct drm_dp_mst_topology_ref_history *history,
1466 enum drm_dp_mst_topology_ref_type type)
1467{
1468 struct drm_dp_mst_topology_ref_entry *entry = NULL;
1469 depot_stack_handle_t backtrace;
1470 ulong stack_entries[STACK_DEPTH];
1471 uint n;
1472 int i;
1473
1474 n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1);
1475 backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL);
1476 if (!backtrace)
1477 return;
1478
1479
1480 for (i = 0; i < history->len; i++) {
1481 if (history->entries[i].backtrace == backtrace) {
1482 entry = &history->entries[i];
1483 break;
1484 }
1485 }
1486
1487
1488 if (!entry) {
1489 struct drm_dp_mst_topology_ref_entry *new;
1490 int new_len = history->len + 1;
1491
1492 new = krealloc(history->entries, sizeof(*new) * new_len,
1493 GFP_KERNEL);
1494 if (!new)
1495 return;
1496
1497 entry = &new[history->len];
1498 history->len = new_len;
1499 history->entries = new;
1500
1501 entry->backtrace = backtrace;
1502 entry->type = type;
1503 entry->count = 0;
1504 }
1505 entry->count++;
1506 entry->ts_nsec = ktime_get_ns();
1507}
1508
1509static int
1510topology_ref_history_cmp(const void *a, const void *b)
1511{
1512 const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b;
1513
1514 if (entry_a->ts_nsec > entry_b->ts_nsec)
1515 return 1;
1516 else if (entry_a->ts_nsec < entry_b->ts_nsec)
1517 return -1;
1518 else
1519 return 0;
1520}
1521
1522static inline const char *
1523topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)
1524{
1525 if (type == DRM_DP_MST_TOPOLOGY_REF_GET)
1526 return "get";
1527 else
1528 return "put";
1529}
1530
1531static void
1532__dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history,
1533 void *ptr, const char *type_str)
1534{
1535 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
1536 char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
1537 int i;
1538
1539 if (!buf)
1540 return;
1541
1542 if (!history->len)
1543 goto out;
1544
1545
1546
1547
1548 sort(history->entries, history->len, sizeof(*history->entries),
1549 topology_ref_history_cmp, NULL);
1550
1551 drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n",
1552 type_str, ptr);
1553
1554 for (i = 0; i < history->len; i++) {
1555 const struct drm_dp_mst_topology_ref_entry *entry =
1556 &history->entries[i];
1557 ulong *entries;
1558 uint nr_entries;
1559 u64 ts_nsec = entry->ts_nsec;
1560 u32 rem_nsec = do_div(ts_nsec, 1000000000);
1561
1562 nr_entries = stack_depot_fetch(entry->backtrace, &entries);
1563 stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 4);
1564
1565 drm_printf(&p, " %d %ss (last at %5llu.%06u):\n%s",
1566 entry->count,
1567 topology_ref_type_to_str(entry->type),
1568 ts_nsec, rem_nsec / 1000, buf);
1569 }
1570
1571
1572 kfree(history->entries);
1573out:
1574 kfree(buf);
1575}
1576
1577static __always_inline void
1578drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb)
1579{
1580 __dump_topology_ref_history(&mstb->topology_ref_history, mstb,
1581 "MSTB");
1582}
1583
1584static __always_inline void
1585drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port)
1586{
1587 __dump_topology_ref_history(&port->topology_ref_history, port,
1588 "Port");
1589}
1590
1591static __always_inline void
1592save_mstb_topology_ref(struct drm_dp_mst_branch *mstb,
1593 enum drm_dp_mst_topology_ref_type type)
1594{
1595 __topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type);
1596}
1597
1598static __always_inline void
1599save_port_topology_ref(struct drm_dp_mst_port *port,
1600 enum drm_dp_mst_topology_ref_type type)
1601{
1602 __topology_ref_save(port->mgr, &port->topology_ref_history, type);
1603}
1604
1605static inline void
1606topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr)
1607{
1608 mutex_lock(&mgr->topology_ref_history_lock);
1609}
1610
1611static inline void
1612topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr)
1613{
1614 mutex_unlock(&mgr->topology_ref_history_lock);
1615}
1616#else
1617static inline void
1618topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {}
1619static inline void
1620topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {}
1621static inline void
1622drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {}
1623static inline void
1624drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {}
1625#define save_mstb_topology_ref(mstb, type)
1626#define save_port_topology_ref(port, type)
1627#endif
1628
1629static void drm_dp_destroy_mst_branch_device(struct kref *kref)
1630{
1631 struct drm_dp_mst_branch *mstb =
1632 container_of(kref, struct drm_dp_mst_branch, topology_kref);
1633 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1634
1635 drm_dp_mst_dump_mstb_topology_history(mstb);
1636
1637 INIT_LIST_HEAD(&mstb->destroy_next);
1638
1639
1640
1641
1642
1643 mutex_lock(&mgr->delayed_destroy_lock);
1644 list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list);
1645 mutex_unlock(&mgr->delayed_destroy_lock);
1646 queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);
1647}
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671static int __must_check
1672drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
1673{
1674 int ret;
1675
1676 topology_ref_history_lock(mstb->mgr);
1677 ret = kref_get_unless_zero(&mstb->topology_kref);
1678 if (ret) {
1679 DRM_DEBUG("mstb %p (%d)\n",
1680 mstb, kref_read(&mstb->topology_kref));
1681 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
1682 }
1683
1684 topology_ref_history_unlock(mstb->mgr);
1685
1686 return ret;
1687}
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
1704{
1705 topology_ref_history_lock(mstb->mgr);
1706
1707 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
1708 WARN_ON(kref_read(&mstb->topology_kref) == 0);
1709 kref_get(&mstb->topology_kref);
1710 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
1711
1712 topology_ref_history_unlock(mstb->mgr);
1713}
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727static void
1728drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
1729{
1730 topology_ref_history_lock(mstb->mgr);
1731
1732 DRM_DEBUG("mstb %p (%d)\n",
1733 mstb, kref_read(&mstb->topology_kref) - 1);
1734 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT);
1735
1736 topology_ref_history_unlock(mstb->mgr);
1737 kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
1738}
1739
1740static void drm_dp_destroy_port(struct kref *kref)
1741{
1742 struct drm_dp_mst_port *port =
1743 container_of(kref, struct drm_dp_mst_port, topology_kref);
1744 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
1745
1746 drm_dp_mst_dump_port_topology_history(port);
1747
1748
1749 if (port->input) {
1750 drm_dp_mst_put_port_malloc(port);
1751 return;
1752 }
1753
1754 kfree(port->cached_edid);
1755
1756
1757
1758
1759
1760 mutex_lock(&mgr->delayed_destroy_lock);
1761 list_add(&port->next, &mgr->destroy_port_list);
1762 mutex_unlock(&mgr->delayed_destroy_lock);
1763 queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);
1764}
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788static int __must_check
1789drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
1790{
1791 int ret;
1792
1793 topology_ref_history_lock(port->mgr);
1794 ret = kref_get_unless_zero(&port->topology_kref);
1795 if (ret) {
1796 DRM_DEBUG("port %p (%d)\n",
1797 port, kref_read(&port->topology_kref));
1798 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
1799 }
1800
1801 topology_ref_history_unlock(port->mgr);
1802 return ret;
1803}
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
1819{
1820 topology_ref_history_lock(port->mgr);
1821
1822 WARN_ON(kref_read(&port->topology_kref) == 0);
1823 kref_get(&port->topology_kref);
1824 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref));
1825 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
1826
1827 topology_ref_history_unlock(port->mgr);
1828}
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
1842{
1843 topology_ref_history_lock(port->mgr);
1844
1845 DRM_DEBUG("port %p (%d)\n",
1846 port, kref_read(&port->topology_kref) - 1);
1847 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT);
1848
1849 topology_ref_history_unlock(port->mgr);
1850 kref_put(&port->topology_kref, drm_dp_destroy_port);
1851}
1852
1853static struct drm_dp_mst_branch *
1854drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
1855 struct drm_dp_mst_branch *to_find)
1856{
1857 struct drm_dp_mst_port *port;
1858 struct drm_dp_mst_branch *rmstb;
1859
1860 if (to_find == mstb)
1861 return mstb;
1862
1863 list_for_each_entry(port, &mstb->ports, next) {
1864 if (port->mstb) {
1865 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1866 port->mstb, to_find);
1867 if (rmstb)
1868 return rmstb;
1869 }
1870 }
1871 return NULL;
1872}
1873
1874static struct drm_dp_mst_branch *
1875drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
1876 struct drm_dp_mst_branch *mstb)
1877{
1878 struct drm_dp_mst_branch *rmstb = NULL;
1879
1880 mutex_lock(&mgr->lock);
1881 if (mgr->mst_primary) {
1882 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1883 mgr->mst_primary, mstb);
1884
1885 if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
1886 rmstb = NULL;
1887 }
1888 mutex_unlock(&mgr->lock);
1889 return rmstb;
1890}
1891
1892static struct drm_dp_mst_port *
1893drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
1894 struct drm_dp_mst_port *to_find)
1895{
1896 struct drm_dp_mst_port *port, *mport;
1897
1898 list_for_each_entry(port, &mstb->ports, next) {
1899 if (port == to_find)
1900 return port;
1901
1902 if (port->mstb) {
1903 mport = drm_dp_mst_topology_get_port_validated_locked(
1904 port->mstb, to_find);
1905 if (mport)
1906 return mport;
1907 }
1908 }
1909 return NULL;
1910}
1911
1912static struct drm_dp_mst_port *
1913drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
1914 struct drm_dp_mst_port *port)
1915{
1916 struct drm_dp_mst_port *rport = NULL;
1917
1918 mutex_lock(&mgr->lock);
1919 if (mgr->mst_primary) {
1920 rport = drm_dp_mst_topology_get_port_validated_locked(
1921 mgr->mst_primary, port);
1922
1923 if (rport && !drm_dp_mst_topology_try_get_port(rport))
1924 rport = NULL;
1925 }
1926 mutex_unlock(&mgr->lock);
1927 return rport;
1928}
1929
1930static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
1931{
1932 struct drm_dp_mst_port *port;
1933 int ret;
1934
1935 list_for_each_entry(port, &mstb->ports, next) {
1936 if (port->port_num == port_num) {
1937 ret = drm_dp_mst_topology_try_get_port(port);
1938 return ret ? port : NULL;
1939 }
1940 }
1941
1942 return NULL;
1943}
1944
1945
1946
1947
1948
1949
1950static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
1951 u8 *rad)
1952{
1953 int parent_lct = port->parent->lct;
1954 int shift = 4;
1955 int idx = (parent_lct - 1) / 2;
1956
1957 if (parent_lct > 1) {
1958 memcpy(rad, port->parent->rad, idx + 1);
1959 shift = (parent_lct % 2) ? 4 : 0;
1960 } else
1961 rad[0] = 0;
1962
1963 rad[idx] |= port->port_num << shift;
1964 return parent_lct + 1;
1965}
1966
1967static bool drm_dp_mst_is_end_device(u8 pdt, bool mcs)
1968{
1969 switch (pdt) {
1970 case DP_PEER_DEVICE_DP_LEGACY_CONV:
1971 case DP_PEER_DEVICE_SST_SINK:
1972 return true;
1973 case DP_PEER_DEVICE_MST_BRANCHING:
1974
1975 if (!mcs)
1976 return true;
1977
1978 return false;
1979 }
1980 return true;
1981}
1982
1983static int
1984drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
1985 bool new_mcs)
1986{
1987 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
1988 struct drm_dp_mst_branch *mstb;
1989 u8 rad[8], lct;
1990 int ret = 0;
1991
1992 if (port->pdt == new_pdt && port->mcs == new_mcs)
1993 return 0;
1994
1995
1996 if (port->pdt != DP_PEER_DEVICE_NONE) {
1997 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
1998
1999
2000
2001
2002 if (new_pdt != DP_PEER_DEVICE_NONE &&
2003 drm_dp_mst_is_end_device(new_pdt, new_mcs)) {
2004 port->pdt = new_pdt;
2005 port->mcs = new_mcs;
2006 return 0;
2007 }
2008
2009
2010 drm_dp_mst_unregister_i2c_bus(port);
2011 } else {
2012 mutex_lock(&mgr->lock);
2013 drm_dp_mst_topology_put_mstb(port->mstb);
2014 port->mstb = NULL;
2015 mutex_unlock(&mgr->lock);
2016 }
2017 }
2018
2019 port->pdt = new_pdt;
2020 port->mcs = new_mcs;
2021
2022 if (port->pdt != DP_PEER_DEVICE_NONE) {
2023 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
2024
2025 ret = drm_dp_mst_register_i2c_bus(port);
2026 } else {
2027 lct = drm_dp_calculate_rad(port, rad);
2028 mstb = drm_dp_add_mst_branch_device(lct, rad);
2029 if (!mstb) {
2030 ret = -ENOMEM;
2031 DRM_ERROR("Failed to create MSTB for port %p",
2032 port);
2033 goto out;
2034 }
2035
2036 mutex_lock(&mgr->lock);
2037 port->mstb = mstb;
2038 mstb->mgr = port->mgr;
2039 mstb->port_parent = port;
2040
2041
2042
2043
2044
2045 drm_dp_mst_get_port_malloc(port);
2046 mutex_unlock(&mgr->lock);
2047
2048
2049 ret = 1;
2050 }
2051 }
2052
2053out:
2054 if (ret < 0)
2055 port->pdt = DP_PEER_DEVICE_NONE;
2056 return ret;
2057}
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
2073 unsigned int offset, void *buffer, size_t size)
2074{
2075 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
2076 aux);
2077
2078 return drm_dp_send_dpcd_read(port->mgr, port,
2079 offset, size, buffer);
2080}
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
2096 unsigned int offset, void *buffer, size_t size)
2097{
2098 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
2099 aux);
2100
2101 return drm_dp_send_dpcd_write(port->mgr, port,
2102 offset, size, buffer);
2103}
2104
2105static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
2106{
2107 int ret = 0;
2108
2109 memcpy(mstb->guid, guid, 16);
2110
2111 if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
2112 if (mstb->port_parent) {
2113 ret = drm_dp_send_dpcd_write(mstb->mgr,
2114 mstb->port_parent,
2115 DP_GUID, 16, mstb->guid);
2116 } else {
2117 ret = drm_dp_dpcd_write(mstb->mgr->aux,
2118 DP_GUID, mstb->guid, 16);
2119 }
2120 }
2121
2122 if (ret < 16 && ret > 0)
2123 return -EPROTO;
2124
2125 return ret == 16 ? 0 : ret;
2126}
2127
2128static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
2129 int pnum,
2130 char *proppath,
2131 size_t proppath_size)
2132{
2133 int i;
2134 char temp[8];
2135
2136 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
2137 for (i = 0; i < (mstb->lct - 1); i++) {
2138 int shift = (i % 2) ? 0 : 4;
2139 int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
2140
2141 snprintf(temp, sizeof(temp), "-%d", port_num);
2142 strlcat(proppath, temp, proppath_size);
2143 }
2144 snprintf(temp, sizeof(temp), "-%d", pnum);
2145 strlcat(proppath, temp, proppath_size);
2146}
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159int drm_dp_mst_connector_late_register(struct drm_connector *connector,
2160 struct drm_dp_mst_port *port)
2161{
2162 DRM_DEBUG_KMS("registering %s remote bus for %s\n",
2163 port->aux.name, connector->kdev->kobj.name);
2164
2165 port->aux.dev = connector->kdev;
2166 return drm_dp_aux_register_devnode(&port->aux);
2167}
2168EXPORT_SYMBOL(drm_dp_mst_connector_late_register);
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
2180 struct drm_dp_mst_port *port)
2181{
2182 DRM_DEBUG_KMS("unregistering %s remote bus for %s\n",
2183 port->aux.name, connector->kdev->kobj.name);
2184 drm_dp_aux_unregister_devnode(&port->aux);
2185}
2186EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
2187
2188static void
2189drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
2190 struct drm_dp_mst_port *port)
2191{
2192 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2193 char proppath[255];
2194 int ret;
2195
2196 build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
2197 port->connector = mgr->cbs->add_connector(mgr, port, proppath);
2198 if (!port->connector) {
2199 ret = -ENOMEM;
2200 goto error;
2201 }
2202
2203 if (port->pdt != DP_PEER_DEVICE_NONE &&
2204 drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
2205 port->cached_edid = drm_get_edid(port->connector,
2206 &port->aux.ddc);
2207 drm_connector_set_tile_property(port->connector);
2208 }
2209
2210 drm_connector_register(port->connector);
2211 return;
2212
2213error:
2214 DRM_ERROR("Failed to create connector for port %p: %d\n", port, ret);
2215}
2216
2217
2218
2219
2220
2221static void
2222drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr,
2223 struct drm_dp_mst_port *port)
2224{
2225 mutex_lock(&mgr->lock);
2226 port->parent->num_ports--;
2227 list_del(&port->next);
2228 mutex_unlock(&mgr->lock);
2229 drm_dp_mst_topology_put_port(port);
2230}
2231
2232static struct drm_dp_mst_port *
2233drm_dp_mst_add_port(struct drm_device *dev,
2234 struct drm_dp_mst_topology_mgr *mgr,
2235 struct drm_dp_mst_branch *mstb, u8 port_number)
2236{
2237 struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL);
2238
2239 if (!port)
2240 return NULL;
2241
2242 kref_init(&port->topology_kref);
2243 kref_init(&port->malloc_kref);
2244 port->parent = mstb;
2245 port->port_num = port_number;
2246 port->mgr = mgr;
2247 port->aux.name = "DPMST";
2248 port->aux.dev = dev->dev;
2249 port->aux.is_remote = true;
2250
2251
2252 drm_dp_remote_aux_init(&port->aux);
2253
2254
2255
2256
2257
2258 drm_dp_mst_get_mstb_malloc(mstb);
2259
2260 return port;
2261}
2262
2263static int
2264drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
2265 struct drm_device *dev,
2266 struct drm_dp_link_addr_reply_port *port_msg)
2267{
2268 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
2269 struct drm_dp_mst_port *port;
2270 int old_ddps = 0, ret;
2271 u8 new_pdt = DP_PEER_DEVICE_NONE;
2272 bool new_mcs = 0;
2273 bool created = false, send_link_addr = false, changed = false;
2274
2275 port = drm_dp_get_port(mstb, port_msg->port_number);
2276 if (!port) {
2277 port = drm_dp_mst_add_port(dev, mgr, mstb,
2278 port_msg->port_number);
2279 if (!port)
2280 return -ENOMEM;
2281 created = true;
2282 changed = true;
2283 } else if (!port->input && port_msg->input_port && port->connector) {
2284
2285
2286
2287 drm_dp_mst_topology_unlink_port(mgr, port);
2288 drm_dp_mst_topology_put_port(port);
2289 port = drm_dp_mst_add_port(dev, mgr, mstb,
2290 port_msg->port_number);
2291 if (!port)
2292 return -ENOMEM;
2293 changed = true;
2294 created = true;
2295 } else if (port->input && !port_msg->input_port) {
2296 changed = true;
2297 } else if (port->connector) {
2298
2299
2300
2301 drm_modeset_lock(&mgr->base.lock, NULL);
2302
2303 old_ddps = port->ddps;
2304 changed = port->ddps != port_msg->ddps ||
2305 (port->ddps &&
2306 (port->ldps != port_msg->legacy_device_plug_status ||
2307 port->dpcd_rev != port_msg->dpcd_revision ||
2308 port->mcs != port_msg->mcs ||
2309 port->pdt != port_msg->peer_device_type ||
2310 port->num_sdp_stream_sinks !=
2311 port_msg->num_sdp_stream_sinks));
2312 }
2313
2314 port->input = port_msg->input_port;
2315 if (!port->input)
2316 new_pdt = port_msg->peer_device_type;
2317 new_mcs = port_msg->mcs;
2318 port->ddps = port_msg->ddps;
2319 port->ldps = port_msg->legacy_device_plug_status;
2320 port->dpcd_rev = port_msg->dpcd_revision;
2321 port->num_sdp_streams = port_msg->num_sdp_streams;
2322 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
2323
2324
2325
2326 if (created) {
2327 mutex_lock(&mgr->lock);
2328 drm_dp_mst_topology_get_port(port);
2329 list_add(&port->next, &mstb->ports);
2330 mstb->num_ports++;
2331 mutex_unlock(&mgr->lock);
2332 }
2333
2334
2335
2336
2337
2338 if (old_ddps != port->ddps || !created) {
2339 if (port->ddps && !port->input) {
2340 ret = drm_dp_send_enum_path_resources(mgr, mstb,
2341 port);
2342 if (ret == 1)
2343 changed = true;
2344 } else {
2345 port->full_pbn = 0;
2346 }
2347 }
2348
2349 ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
2350 if (ret == 1) {
2351 send_link_addr = true;
2352 } else if (ret < 0) {
2353 DRM_ERROR("Failed to change PDT on port %p: %d\n",
2354 port, ret);
2355 goto fail;
2356 }
2357
2358
2359
2360
2361
2362
2363 if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
2364 port->mcs)
2365 send_link_addr = true;
2366
2367 if (port->connector)
2368 drm_modeset_unlock(&mgr->base.lock);
2369 else if (!port->input)
2370 drm_dp_mst_port_add_connector(mstb, port);
2371
2372 if (send_link_addr && port->mstb) {
2373 ret = drm_dp_send_link_address(mgr, port->mstb);
2374 if (ret == 1)
2375 changed = true;
2376 else if (ret < 0)
2377 goto fail_put;
2378 }
2379
2380
2381 drm_dp_mst_topology_put_port(port);
2382 return changed;
2383
2384fail:
2385 drm_dp_mst_topology_unlink_port(mgr, port);
2386 if (port->connector)
2387 drm_modeset_unlock(&mgr->base.lock);
2388fail_put:
2389 drm_dp_mst_topology_put_port(port);
2390 return ret;
2391}
2392
2393static void
2394drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
2395 struct drm_dp_connection_status_notify *conn_stat)
2396{
2397 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
2398 struct drm_dp_mst_port *port;
2399 int old_ddps, old_input, ret, i;
2400 u8 new_pdt;
2401 bool new_mcs;
2402 bool dowork = false, create_connector = false;
2403
2404 port = drm_dp_get_port(mstb, conn_stat->port_number);
2405 if (!port)
2406 return;
2407
2408 if (port->connector) {
2409 if (!port->input && conn_stat->input_port) {
2410
2411
2412
2413
2414
2415 drm_dp_mst_topology_unlink_port(mgr, port);
2416 mstb->link_address_sent = false;
2417 dowork = true;
2418 goto out;
2419 }
2420
2421
2422 drm_modeset_lock(&mgr->base.lock, NULL);
2423 } else if (port->input && !conn_stat->input_port) {
2424 create_connector = true;
2425
2426 mstb->link_address_sent = false;
2427 dowork = true;
2428 }
2429
2430 old_ddps = port->ddps;
2431 old_input = port->input;
2432 port->input = conn_stat->input_port;
2433 port->ldps = conn_stat->legacy_device_plug_status;
2434 port->ddps = conn_stat->displayport_device_plug_status;
2435
2436 if (old_ddps != port->ddps) {
2437 if (port->ddps && !port->input)
2438 drm_dp_send_enum_path_resources(mgr, mstb, port);
2439 else
2440 port->full_pbn = 0;
2441 }
2442
2443 new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
2444 new_mcs = conn_stat->message_capability_status;
2445 ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
2446 if (ret == 1) {
2447 dowork = true;
2448 } else if (ret < 0) {
2449 DRM_ERROR("Failed to change PDT for port %p: %d\n",
2450 port, ret);
2451 dowork = false;
2452 }
2453
2454 if (!old_input && old_ddps != port->ddps && !port->ddps) {
2455 for (i = 0; i < mgr->max_payloads; i++) {
2456 struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
2457 struct drm_dp_mst_port *port_validated;
2458
2459 if (!vcpi)
2460 continue;
2461
2462 port_validated =
2463 container_of(vcpi, struct drm_dp_mst_port, vcpi);
2464 port_validated =
2465 drm_dp_mst_topology_get_port_validated(mgr, port_validated);
2466 if (!port_validated) {
2467 mutex_lock(&mgr->payload_lock);
2468 vcpi->num_slots = 0;
2469 mutex_unlock(&mgr->payload_lock);
2470 } else {
2471 drm_dp_mst_topology_put_port(port_validated);
2472 }
2473 }
2474 }
2475
2476 if (port->connector)
2477 drm_modeset_unlock(&mgr->base.lock);
2478 else if (create_connector)
2479 drm_dp_mst_port_add_connector(mstb, port);
2480
2481out:
2482 drm_dp_mst_topology_put_port(port);
2483 if (dowork)
2484 queue_work(system_long_wq, &mstb->mgr->work);
2485}
2486
2487static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
2488 u8 lct, u8 *rad)
2489{
2490 struct drm_dp_mst_branch *mstb;
2491 struct drm_dp_mst_port *port;
2492 int i, ret;
2493
2494
2495 mutex_lock(&mgr->lock);
2496 mstb = mgr->mst_primary;
2497
2498 if (!mstb)
2499 goto out;
2500
2501 for (i = 0; i < lct - 1; i++) {
2502 int shift = (i % 2) ? 0 : 4;
2503 int port_num = (rad[i / 2] >> shift) & 0xf;
2504
2505 list_for_each_entry(port, &mstb->ports, next) {
2506 if (port->port_num == port_num) {
2507 mstb = port->mstb;
2508 if (!mstb) {
2509 DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
2510 goto out;
2511 }
2512
2513 break;
2514 }
2515 }
2516 }
2517 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2518 if (!ret)
2519 mstb = NULL;
2520out:
2521 mutex_unlock(&mgr->lock);
2522 return mstb;
2523}
2524
2525static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
2526 struct drm_dp_mst_branch *mstb,
2527 const uint8_t *guid)
2528{
2529 struct drm_dp_mst_branch *found_mstb;
2530 struct drm_dp_mst_port *port;
2531
2532 if (memcmp(mstb->guid, guid, 16) == 0)
2533 return mstb;
2534
2535
2536 list_for_each_entry(port, &mstb->ports, next) {
2537 if (!port->mstb)
2538 continue;
2539
2540 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
2541
2542 if (found_mstb)
2543 return found_mstb;
2544 }
2545
2546 return NULL;
2547}
2548
2549static struct drm_dp_mst_branch *
2550drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
2551 const uint8_t *guid)
2552{
2553 struct drm_dp_mst_branch *mstb;
2554 int ret;
2555
2556
2557 mutex_lock(&mgr->lock);
2558
2559 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
2560 if (mstb) {
2561 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2562 if (!ret)
2563 mstb = NULL;
2564 }
2565
2566 mutex_unlock(&mgr->lock);
2567 return mstb;
2568}
2569
2570static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2571 struct drm_dp_mst_branch *mstb)
2572{
2573 struct drm_dp_mst_port *port;
2574 int ret;
2575 bool changed = false;
2576
2577 if (!mstb->link_address_sent) {
2578 ret = drm_dp_send_link_address(mgr, mstb);
2579 if (ret == 1)
2580 changed = true;
2581 else if (ret < 0)
2582 return ret;
2583 }
2584
2585 list_for_each_entry(port, &mstb->ports, next) {
2586 struct drm_dp_mst_branch *mstb_child = NULL;
2587
2588 if (port->input || !port->ddps)
2589 continue;
2590
2591 if (port->mstb)
2592 mstb_child = drm_dp_mst_topology_get_mstb_validated(
2593 mgr, port->mstb);
2594
2595 if (mstb_child) {
2596 ret = drm_dp_check_and_send_link_address(mgr,
2597 mstb_child);
2598 drm_dp_mst_topology_put_mstb(mstb_child);
2599 if (ret == 1)
2600 changed = true;
2601 else if (ret < 0)
2602 return ret;
2603 }
2604 }
2605
2606 return changed;
2607}
2608
2609static void drm_dp_mst_link_probe_work(struct work_struct *work)
2610{
2611 struct drm_dp_mst_topology_mgr *mgr =
2612 container_of(work, struct drm_dp_mst_topology_mgr, work);
2613 struct drm_device *dev = mgr->dev;
2614 struct drm_dp_mst_branch *mstb;
2615 int ret;
2616 bool clear_payload_id_table;
2617
2618 mutex_lock(&mgr->probe_lock);
2619
2620 mutex_lock(&mgr->lock);
2621 clear_payload_id_table = !mgr->payload_id_table_cleared;
2622 mgr->payload_id_table_cleared = true;
2623
2624 mstb = mgr->mst_primary;
2625 if (mstb) {
2626 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2627 if (!ret)
2628 mstb = NULL;
2629 }
2630 mutex_unlock(&mgr->lock);
2631 if (!mstb) {
2632 mutex_unlock(&mgr->probe_lock);
2633 return;
2634 }
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644 if (clear_payload_id_table) {
2645 DRM_DEBUG_KMS("Clearing payload ID table\n");
2646 drm_dp_send_clear_payload_id_table(mgr, mstb);
2647 }
2648
2649 ret = drm_dp_check_and_send_link_address(mgr, mstb);
2650 drm_dp_mst_topology_put_mstb(mstb);
2651
2652 mutex_unlock(&mgr->probe_lock);
2653 if (ret)
2654 drm_kms_helper_hotplug_event(dev);
2655}
2656
2657static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
2658 u8 *guid)
2659{
2660 u64 salt;
2661
2662 if (memchr_inv(guid, 0, 16))
2663 return true;
2664
2665 salt = get_jiffies_64();
2666
2667 memcpy(&guid[0], &salt, sizeof(u64));
2668 memcpy(&guid[8], &salt, sizeof(u64));
2669
2670 return false;
2671}
2672
2673static void build_dpcd_read(struct drm_dp_sideband_msg_tx *msg,
2674 u8 port_num, u32 offset, u8 num_bytes)
2675{
2676 struct drm_dp_sideband_msg_req_body req;
2677
2678 req.req_type = DP_REMOTE_DPCD_READ;
2679 req.u.dpcd_read.port_number = port_num;
2680 req.u.dpcd_read.dpcd_address = offset;
2681 req.u.dpcd_read.num_bytes = num_bytes;
2682 drm_dp_encode_sideband_req(&req, msg);
2683}
2684
2685static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
2686 bool up, u8 *msg, int len)
2687{
2688 int ret;
2689 int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
2690 int tosend, total, offset;
2691 int retries = 0;
2692
2693retry:
2694 total = len;
2695 offset = 0;
2696 do {
2697 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
2698
2699 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
2700 &msg[offset],
2701 tosend);
2702 if (ret != tosend) {
2703 if (ret == -EIO && retries < 5) {
2704 retries++;
2705 goto retry;
2706 }
2707 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
2708
2709 return -EIO;
2710 }
2711 offset += tosend;
2712 total -= tosend;
2713 } while (total > 0);
2714 return 0;
2715}
2716
2717static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
2718 struct drm_dp_sideband_msg_tx *txmsg)
2719{
2720 struct drm_dp_mst_branch *mstb = txmsg->dst;
2721 u8 req_type;
2722
2723 req_type = txmsg->msg[0] & 0x7f;
2724 if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
2725 req_type == DP_RESOURCE_STATUS_NOTIFY)
2726 hdr->broadcast = 1;
2727 else
2728 hdr->broadcast = 0;
2729 hdr->path_msg = txmsg->path_msg;
2730 hdr->lct = mstb->lct;
2731 hdr->lcr = mstb->lct - 1;
2732 if (mstb->lct > 1)
2733 memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
2734
2735 return 0;
2736}
2737
2738
2739
2740static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
2741 struct drm_dp_sideband_msg_tx *txmsg,
2742 bool up)
2743{
2744 u8 chunk[48];
2745 struct drm_dp_sideband_msg_hdr hdr;
2746 int len, space, idx, tosend;
2747 int ret;
2748
2749 if (txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
2750 return 0;
2751
2752 memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
2753
2754 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED)
2755 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
2756
2757
2758 ret = set_hdr_from_dst_qlock(&hdr, txmsg);
2759 if (ret < 0)
2760 return ret;
2761
2762
2763 len = txmsg->cur_len - txmsg->cur_offset;
2764
2765
2766 space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
2767
2768 tosend = min(len, space);
2769 if (len == txmsg->cur_len)
2770 hdr.somt = 1;
2771 if (space >= len)
2772 hdr.eomt = 1;
2773
2774
2775 hdr.msg_len = tosend + 1;
2776 drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
2777 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
2778
2779 drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
2780 idx += tosend + 1;
2781
2782 ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
2783 if (unlikely(ret) && drm_debug_enabled(DRM_UT_DP)) {
2784 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
2785
2786 drm_printf(&p, "sideband msg failed to send\n");
2787 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2788 return ret;
2789 }
2790
2791 txmsg->cur_offset += tosend;
2792 if (txmsg->cur_offset == txmsg->cur_len) {
2793 txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
2794 return 1;
2795 }
2796 return 0;
2797}
2798
2799static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
2800{
2801 struct drm_dp_sideband_msg_tx *txmsg;
2802 int ret;
2803
2804 WARN_ON(!mutex_is_locked(&mgr->qlock));
2805
2806
2807 if (list_empty(&mgr->tx_msg_downq))
2808 return;
2809
2810 txmsg = list_first_entry(&mgr->tx_msg_downq,
2811 struct drm_dp_sideband_msg_tx, next);
2812 ret = process_single_tx_qlock(mgr, txmsg, false);
2813 if (ret < 0) {
2814 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
2815 list_del(&txmsg->next);
2816 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
2817 wake_up_all(&mgr->tx_waitq);
2818 }
2819}
2820
2821static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
2822 struct drm_dp_sideband_msg_tx *txmsg)
2823{
2824 mutex_lock(&mgr->qlock);
2825 list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
2826
2827 if (drm_debug_enabled(DRM_UT_DP)) {
2828 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
2829
2830 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2831 }
2832
2833 if (list_is_singular(&mgr->tx_msg_downq))
2834 process_single_down_tx_qlock(mgr);
2835 mutex_unlock(&mgr->qlock);
2836}
2837
2838static void
2839drm_dp_dump_link_address(struct drm_dp_link_address_ack_reply *reply)
2840{
2841 struct drm_dp_link_addr_reply_port *port_reply;
2842 int i;
2843
2844 for (i = 0; i < reply->nports; i++) {
2845 port_reply = &reply->ports[i];
2846 DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n",
2847 i,
2848 port_reply->input_port,
2849 port_reply->peer_device_type,
2850 port_reply->port_number,
2851 port_reply->dpcd_revision,
2852 port_reply->mcs,
2853 port_reply->ddps,
2854 port_reply->legacy_device_plug_status,
2855 port_reply->num_sdp_streams,
2856 port_reply->num_sdp_stream_sinks);
2857 }
2858}
2859
2860static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2861 struct drm_dp_mst_branch *mstb)
2862{
2863 struct drm_dp_sideband_msg_tx *txmsg;
2864 struct drm_dp_link_address_ack_reply *reply;
2865 struct drm_dp_mst_port *port, *tmp;
2866 int i, ret, port_mask = 0;
2867 bool changed = false;
2868
2869 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2870 if (!txmsg)
2871 return -ENOMEM;
2872
2873 txmsg->dst = mstb;
2874 build_link_address(txmsg);
2875
2876 mstb->link_address_sent = true;
2877 drm_dp_queue_down_tx(mgr, txmsg);
2878
2879
2880 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2881 if (ret <= 0) {
2882 DRM_ERROR("Sending link address failed with %d\n", ret);
2883 goto out;
2884 }
2885 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2886 DRM_ERROR("link address NAK received\n");
2887 ret = -EIO;
2888 goto out;
2889 }
2890
2891 reply = &txmsg->reply.u.link_addr;
2892 DRM_DEBUG_KMS("link address reply: %d\n", reply->nports);
2893 drm_dp_dump_link_address(reply);
2894
2895 ret = drm_dp_check_mstb_guid(mstb, reply->guid);
2896 if (ret) {
2897 char buf[64];
2898
2899 drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, buf, sizeof(buf));
2900 DRM_ERROR("GUID check on %s failed: %d\n",
2901 buf, ret);
2902 goto out;
2903 }
2904
2905 for (i = 0; i < reply->nports; i++) {
2906 port_mask |= BIT(reply->ports[i].port_number);
2907 ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev,
2908 &reply->ports[i]);
2909 if (ret == 1)
2910 changed = true;
2911 else if (ret < 0)
2912 goto out;
2913 }
2914
2915
2916
2917
2918
2919
2920 mutex_lock(&mgr->lock);
2921 list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
2922 if (port_mask & BIT(port->port_num))
2923 continue;
2924
2925 DRM_DEBUG_KMS("port %d was not in link address, removing\n",
2926 port->port_num);
2927 list_del(&port->next);
2928 drm_dp_mst_topology_put_port(port);
2929 changed = true;
2930 }
2931 mutex_unlock(&mgr->lock);
2932
2933out:
2934 if (ret <= 0)
2935 mstb->link_address_sent = false;
2936 kfree(txmsg);
2937 return ret < 0 ? ret : changed;
2938}
2939
2940static void
2941drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
2942 struct drm_dp_mst_branch *mstb)
2943{
2944 struct drm_dp_sideband_msg_tx *txmsg;
2945 int ret;
2946
2947 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2948 if (!txmsg)
2949 return;
2950
2951 txmsg->dst = mstb;
2952 build_clear_payload_id_table(txmsg);
2953
2954 drm_dp_queue_down_tx(mgr, txmsg);
2955
2956 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2957 if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2958 DRM_DEBUG_KMS("clear payload table id nak received\n");
2959
2960 kfree(txmsg);
2961}
2962
2963static int
2964drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
2965 struct drm_dp_mst_branch *mstb,
2966 struct drm_dp_mst_port *port)
2967{
2968 struct drm_dp_enum_path_resources_ack_reply *path_res;
2969 struct drm_dp_sideband_msg_tx *txmsg;
2970 int ret;
2971
2972 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2973 if (!txmsg)
2974 return -ENOMEM;
2975
2976 txmsg->dst = mstb;
2977 build_enum_path_resources(txmsg, port->port_num);
2978
2979 drm_dp_queue_down_tx(mgr, txmsg);
2980
2981 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2982 if (ret > 0) {
2983 ret = 0;
2984 path_res = &txmsg->reply.u.path_resources;
2985
2986 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2987 DRM_DEBUG_KMS("enum path resources nak received\n");
2988 } else {
2989 if (port->port_num != path_res->port_number)
2990 DRM_ERROR("got incorrect port in response\n");
2991
2992 DRM_DEBUG_KMS("enum path resources %d: %d %d\n",
2993 path_res->port_number,
2994 path_res->full_payload_bw_number,
2995 path_res->avail_payload_bw_number);
2996
2997
2998
2999
3000
3001 if (port->full_pbn != path_res->full_payload_bw_number ||
3002 port->fec_capable != path_res->fec_capable)
3003 ret = 1;
3004
3005 port->full_pbn = path_res->full_payload_bw_number;
3006 port->fec_capable = path_res->fec_capable;
3007 }
3008 }
3009
3010 kfree(txmsg);
3011 return ret;
3012}
3013
3014static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
3015{
3016 if (!mstb->port_parent)
3017 return NULL;
3018
3019 if (mstb->port_parent->mstb != mstb)
3020 return mstb->port_parent;
3021
3022 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
3023}
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033static struct drm_dp_mst_branch *
3034drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
3035 struct drm_dp_mst_branch *mstb,
3036 int *port_num)
3037{
3038 struct drm_dp_mst_branch *rmstb = NULL;
3039 struct drm_dp_mst_port *found_port;
3040
3041 mutex_lock(&mgr->lock);
3042 if (!mgr->mst_primary)
3043 goto out;
3044
3045 do {
3046 found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
3047 if (!found_port)
3048 break;
3049
3050 if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
3051 rmstb = found_port->parent;
3052 *port_num = found_port->port_num;
3053 } else {
3054
3055 mstb = found_port->parent;
3056 }
3057 } while (!rmstb);
3058out:
3059 mutex_unlock(&mgr->lock);
3060 return rmstb;
3061}
3062
3063static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
3064 struct drm_dp_mst_port *port,
3065 int id,
3066 int pbn)
3067{
3068 struct drm_dp_sideband_msg_tx *txmsg;
3069 struct drm_dp_mst_branch *mstb;
3070 int ret, port_num;
3071 u8 sinks[DRM_DP_MAX_SDP_STREAMS];
3072 int i;
3073
3074 port_num = port->port_num;
3075 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3076 if (!mstb) {
3077 mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
3078 port->parent,
3079 &port_num);
3080
3081 if (!mstb)
3082 return -EINVAL;
3083 }
3084
3085 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3086 if (!txmsg) {
3087 ret = -ENOMEM;
3088 goto fail_put;
3089 }
3090
3091 for (i = 0; i < port->num_sdp_streams; i++)
3092 sinks[i] = i;
3093
3094 txmsg->dst = mstb;
3095 build_allocate_payload(txmsg, port_num,
3096 id,
3097 pbn, port->num_sdp_streams, sinks);
3098
3099 drm_dp_queue_down_tx(mgr, txmsg);
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3110 if (ret > 0) {
3111 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3112 ret = -EINVAL;
3113 else
3114 ret = 0;
3115 }
3116 kfree(txmsg);
3117fail_put:
3118 drm_dp_mst_topology_put_mstb(mstb);
3119 return ret;
3120}
3121
3122int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
3123 struct drm_dp_mst_port *port, bool power_up)
3124{
3125 struct drm_dp_sideband_msg_tx *txmsg;
3126 int ret;
3127
3128 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3129 if (!port)
3130 return -EINVAL;
3131
3132 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3133 if (!txmsg) {
3134 drm_dp_mst_topology_put_port(port);
3135 return -ENOMEM;
3136 }
3137
3138 txmsg->dst = port->parent;
3139 build_power_updown_phy(txmsg, port->port_num, power_up);
3140 drm_dp_queue_down_tx(mgr, txmsg);
3141
3142 ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
3143 if (ret > 0) {
3144 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3145 ret = -EINVAL;
3146 else
3147 ret = 0;
3148 }
3149 kfree(txmsg);
3150 drm_dp_mst_topology_put_port(port);
3151
3152 return ret;
3153}
3154EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
3155
3156static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
3157 int id,
3158 struct drm_dp_payload *payload)
3159{
3160 int ret;
3161
3162 ret = drm_dp_dpcd_write_payload(mgr, id, payload);
3163 if (ret < 0) {
3164 payload->payload_state = 0;
3165 return ret;
3166 }
3167 payload->payload_state = DP_PAYLOAD_LOCAL;
3168 return 0;
3169}
3170
3171static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
3172 struct drm_dp_mst_port *port,
3173 int id,
3174 struct drm_dp_payload *payload)
3175{
3176 int ret;
3177
3178 ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
3179 if (ret < 0)
3180 return ret;
3181 payload->payload_state = DP_PAYLOAD_REMOTE;
3182 return ret;
3183}
3184
3185static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
3186 struct drm_dp_mst_port *port,
3187 int id,
3188 struct drm_dp_payload *payload)
3189{
3190 DRM_DEBUG_KMS("\n");
3191
3192 if (port) {
3193 drm_dp_payload_send_msg(mgr, port, id, 0);
3194 }
3195
3196 drm_dp_dpcd_write_payload(mgr, id, payload);
3197 payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
3198 return 0;
3199}
3200
3201static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
3202 int id,
3203 struct drm_dp_payload *payload)
3204{
3205 payload->payload_state = 0;
3206 return 0;
3207}
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
3223{
3224 struct drm_dp_payload req_payload;
3225 struct drm_dp_mst_port *port;
3226 int i, j;
3227 int cur_slots = 1;
3228
3229 mutex_lock(&mgr->payload_lock);
3230 for (i = 0; i < mgr->max_payloads; i++) {
3231 struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
3232 struct drm_dp_payload *payload = &mgr->payloads[i];
3233 bool put_port = false;
3234
3235
3236
3237 req_payload.start_slot = cur_slots;
3238 if (vcpi) {
3239 port = container_of(vcpi, struct drm_dp_mst_port,
3240 vcpi);
3241
3242
3243
3244
3245 if (vcpi->num_slots) {
3246 port = drm_dp_mst_topology_get_port_validated(
3247 mgr, port);
3248 if (!port) {
3249 mutex_unlock(&mgr->payload_lock);
3250 return -EINVAL;
3251 }
3252 put_port = true;
3253 }
3254
3255 req_payload.num_slots = vcpi->num_slots;
3256 req_payload.vcpi = vcpi->vcpi;
3257 } else {
3258 port = NULL;
3259 req_payload.num_slots = 0;
3260 }
3261
3262 payload->start_slot = req_payload.start_slot;
3263
3264 if (payload->num_slots != req_payload.num_slots) {
3265
3266
3267 if (req_payload.num_slots) {
3268 drm_dp_create_payload_step1(mgr, vcpi->vcpi,
3269 &req_payload);
3270 payload->num_slots = req_payload.num_slots;
3271 payload->vcpi = req_payload.vcpi;
3272
3273 } else if (payload->num_slots) {
3274 payload->num_slots = 0;
3275 drm_dp_destroy_payload_step1(mgr, port,
3276 payload->vcpi,
3277 payload);
3278 req_payload.payload_state =
3279 payload->payload_state;
3280 payload->start_slot = 0;
3281 }
3282 payload->payload_state = req_payload.payload_state;
3283 }
3284 cur_slots += req_payload.num_slots;
3285
3286 if (put_port)
3287 drm_dp_mst_topology_put_port(port);
3288 }
3289
3290 for (i = 0; i < mgr->max_payloads; ) {
3291 if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) {
3292 i++;
3293 continue;
3294 }
3295
3296 DRM_DEBUG_KMS("removing payload %d\n", i);
3297 for (j = i; j < mgr->max_payloads - 1; j++) {
3298 mgr->payloads[j] = mgr->payloads[j + 1];
3299 mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
3300
3301 if (mgr->proposed_vcpis[j] &&
3302 mgr->proposed_vcpis[j]->num_slots) {
3303 set_bit(j + 1, &mgr->payload_mask);
3304 } else {
3305 clear_bit(j + 1, &mgr->payload_mask);
3306 }
3307 }
3308
3309 memset(&mgr->payloads[mgr->max_payloads - 1], 0,
3310 sizeof(struct drm_dp_payload));
3311 mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
3312 clear_bit(mgr->max_payloads, &mgr->payload_mask);
3313 }
3314 mutex_unlock(&mgr->payload_lock);
3315
3316 return 0;
3317}
3318EXPORT_SYMBOL(drm_dp_update_payload_part1);
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
3330{
3331 struct drm_dp_mst_port *port;
3332 int i;
3333 int ret = 0;
3334
3335 mutex_lock(&mgr->payload_lock);
3336 for (i = 0; i < mgr->max_payloads; i++) {
3337
3338 if (!mgr->proposed_vcpis[i])
3339 continue;
3340
3341 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
3342
3343 DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
3344 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
3345 ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
3346 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
3347 ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
3348 }
3349 if (ret) {
3350 mutex_unlock(&mgr->payload_lock);
3351 return ret;
3352 }
3353 }
3354 mutex_unlock(&mgr->payload_lock);
3355 return 0;
3356}
3357EXPORT_SYMBOL(drm_dp_update_payload_part2);
3358
3359static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
3360 struct drm_dp_mst_port *port,
3361 int offset, int size, u8 *bytes)
3362{
3363 int ret = 0;
3364 struct drm_dp_sideband_msg_tx *txmsg;
3365 struct drm_dp_mst_branch *mstb;
3366
3367 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3368 if (!mstb)
3369 return -EINVAL;
3370
3371 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3372 if (!txmsg) {
3373 ret = -ENOMEM;
3374 goto fail_put;
3375 }
3376
3377 build_dpcd_read(txmsg, port->port_num, offset, size);
3378 txmsg->dst = port->parent;
3379
3380 drm_dp_queue_down_tx(mgr, txmsg);
3381
3382 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3383 if (ret < 0)
3384 goto fail_free;
3385
3386
3387 if (txmsg->reply.reply_type == 1) {
3388 DRM_ERROR("mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n",
3389 mstb, port->port_num, offset, size);
3390 ret = -EIO;
3391 goto fail_free;
3392 }
3393
3394 if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) {
3395 ret = -EPROTO;
3396 goto fail_free;
3397 }
3398
3399 ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes,
3400 size);
3401 memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret);
3402
3403fail_free:
3404 kfree(txmsg);
3405fail_put:
3406 drm_dp_mst_topology_put_mstb(mstb);
3407
3408 return ret;
3409}
3410
3411static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
3412 struct drm_dp_mst_port *port,
3413 int offset, int size, u8 *bytes)
3414{
3415 int ret;
3416 struct drm_dp_sideband_msg_tx *txmsg;
3417 struct drm_dp_mst_branch *mstb;
3418
3419 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3420 if (!mstb)
3421 return -EINVAL;
3422
3423 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3424 if (!txmsg) {
3425 ret = -ENOMEM;
3426 goto fail_put;
3427 }
3428
3429 build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
3430 txmsg->dst = mstb;
3431
3432 drm_dp_queue_down_tx(mgr, txmsg);
3433
3434 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3435 if (ret > 0) {
3436 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3437 ret = -EIO;
3438 else
3439 ret = size;
3440 }
3441
3442 kfree(txmsg);
3443fail_put:
3444 drm_dp_mst_topology_put_mstb(mstb);
3445 return ret;
3446}
3447
3448static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
3449{
3450 struct drm_dp_sideband_msg_reply_body reply;
3451
3452 reply.reply_type = DP_SIDEBAND_REPLY_ACK;
3453 reply.req_type = req_type;
3454 drm_dp_encode_sideband_reply(&reply, msg);
3455 return 0;
3456}
3457
3458static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
3459 struct drm_dp_mst_branch *mstb,
3460 int req_type, bool broadcast)
3461{
3462 struct drm_dp_sideband_msg_tx *txmsg;
3463
3464 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3465 if (!txmsg)
3466 return -ENOMEM;
3467
3468 txmsg->dst = mstb;
3469 drm_dp_encode_up_ack_reply(txmsg, req_type);
3470
3471 mutex_lock(&mgr->qlock);
3472
3473 process_single_tx_qlock(mgr, txmsg, true);
3474 mutex_unlock(&mgr->qlock);
3475
3476 kfree(txmsg);
3477 return 0;
3478}
3479
3480static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8 dp_link_count)
3481{
3482 if (dp_link_bw == 0 || dp_link_count == 0)
3483 DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
3484 dp_link_bw, dp_link_count);
3485
3486 return dp_link_bw * dp_link_count / 2;
3487}
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
3498{
3499 int ret = 0;
3500 struct drm_dp_mst_branch *mstb = NULL;
3501
3502 mutex_lock(&mgr->payload_lock);
3503 mutex_lock(&mgr->lock);
3504 if (mst_state == mgr->mst_state)
3505 goto out_unlock;
3506
3507 mgr->mst_state = mst_state;
3508
3509 if (mst_state) {
3510 struct drm_dp_payload reset_pay;
3511
3512 WARN_ON(mgr->mst_primary);
3513
3514
3515 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
3516 if (ret != DP_RECEIVER_CAP_SIZE) {
3517 DRM_DEBUG_KMS("failed to read DPCD\n");
3518 goto out_unlock;
3519 }
3520
3521 mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1],
3522 mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
3523 if (mgr->pbn_div == 0) {
3524 ret = -EINVAL;
3525 goto out_unlock;
3526 }
3527
3528
3529 mstb = drm_dp_add_mst_branch_device(1, NULL);
3530 if (mstb == NULL) {
3531 ret = -ENOMEM;
3532 goto out_unlock;
3533 }
3534 mstb->mgr = mgr;
3535
3536
3537 mgr->mst_primary = mstb;
3538 drm_dp_mst_topology_get_mstb(mgr->mst_primary);
3539
3540 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3541 DP_MST_EN |
3542 DP_UP_REQ_EN |
3543 DP_UPSTREAM_IS_SRC);
3544 if (ret < 0)
3545 goto out_unlock;
3546
3547 reset_pay.start_slot = 0;
3548 reset_pay.num_slots = 0x3f;
3549 drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
3550
3551 queue_work(system_long_wq, &mgr->work);
3552
3553 ret = 0;
3554 } else {
3555
3556 mstb = mgr->mst_primary;
3557 mgr->mst_primary = NULL;
3558
3559 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
3560 ret = 0;
3561 memset(mgr->payloads, 0,
3562 mgr->max_payloads * sizeof(mgr->payloads[0]));
3563 memset(mgr->proposed_vcpis, 0,
3564 mgr->max_payloads * sizeof(mgr->proposed_vcpis[0]));
3565 mgr->payload_mask = 0;
3566 set_bit(0, &mgr->payload_mask);
3567 mgr->vcpi_mask = 0;
3568 mgr->payload_id_table_cleared = false;
3569 }
3570
3571out_unlock:
3572 mutex_unlock(&mgr->lock);
3573 mutex_unlock(&mgr->payload_lock);
3574 if (mstb)
3575 drm_dp_mst_topology_put_mstb(mstb);
3576 return ret;
3577
3578}
3579EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
3580
3581static void
3582drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
3583{
3584 struct drm_dp_mst_port *port;
3585
3586
3587 mstb->link_address_sent = false;
3588
3589 list_for_each_entry(port, &mstb->ports, next)
3590 if (port->mstb)
3591 drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb);
3592}
3593
3594
3595
3596
3597
3598
3599
3600
3601void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
3602{
3603 mutex_lock(&mgr->lock);
3604 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3605 DP_MST_EN | DP_UPSTREAM_IS_SRC);
3606 mutex_unlock(&mgr->lock);
3607 flush_work(&mgr->up_req_work);
3608 flush_work(&mgr->work);
3609 flush_work(&mgr->delayed_destroy_work);
3610
3611 mutex_lock(&mgr->lock);
3612 if (mgr->mst_state && mgr->mst_primary)
3613 drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);
3614 mutex_unlock(&mgr->lock);
3615}
3616EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
3639 bool sync)
3640{
3641 int ret;
3642 u8 guid[16];
3643
3644 mutex_lock(&mgr->lock);
3645 if (!mgr->mst_primary)
3646 goto out_fail;
3647
3648 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd,
3649 DP_RECEIVER_CAP_SIZE);
3650 if (ret != DP_RECEIVER_CAP_SIZE) {
3651 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
3652 goto out_fail;
3653 }
3654
3655 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3656 DP_MST_EN |
3657 DP_UP_REQ_EN |
3658 DP_UPSTREAM_IS_SRC);
3659 if (ret < 0) {
3660 DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
3661 goto out_fail;
3662 }
3663
3664
3665 ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
3666 if (ret != 16) {
3667 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
3668 goto out_fail;
3669 }
3670
3671 ret = drm_dp_check_mstb_guid(mgr->mst_primary, guid);
3672 if (ret) {
3673 DRM_DEBUG_KMS("check mstb failed - undocked during suspend?\n");
3674 goto out_fail;
3675 }
3676
3677
3678
3679
3680
3681
3682 queue_work(system_long_wq, &mgr->work);
3683 mutex_unlock(&mgr->lock);
3684
3685 if (sync) {
3686 DRM_DEBUG_KMS("Waiting for link probe work to finish re-syncing topology...\n");
3687 flush_work(&mgr->work);
3688 }
3689
3690 return 0;
3691
3692out_fail:
3693 mutex_unlock(&mgr->lock);
3694 return -1;
3695}
3696EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
3697
3698static bool
3699drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
3700 struct drm_dp_mst_branch **mstb)
3701{
3702 int len;
3703 u8 replyblock[32];
3704 int replylen, curreply;
3705 int ret;
3706 u8 hdrlen;
3707 struct drm_dp_sideband_msg_hdr hdr;
3708 struct drm_dp_sideband_msg_rx *msg =
3709 up ? &mgr->up_req_recv : &mgr->down_rep_recv;
3710 int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE :
3711 DP_SIDEBAND_MSG_DOWN_REP_BASE;
3712
3713 if (!up)
3714 *mstb = NULL;
3715
3716 len = min(mgr->max_dpcd_transaction_bytes, 16);
3717 ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len);
3718 if (ret != len) {
3719 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
3720 return false;
3721 }
3722
3723 ret = drm_dp_decode_sideband_msg_hdr(&hdr, replyblock, len, &hdrlen);
3724 if (ret == false) {
3725 print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16,
3726 1, replyblock, len, false);
3727 DRM_DEBUG_KMS("ERROR: failed header\n");
3728 return false;
3729 }
3730
3731 if (!up) {
3732
3733 *mstb = drm_dp_get_mst_branch_device(mgr, hdr.lct, hdr.rad);
3734 if (!*mstb) {
3735 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
3736 hdr.lct);
3737 return false;
3738 }
3739 }
3740
3741 if (!drm_dp_sideband_msg_set_header(msg, &hdr, hdrlen)) {
3742 DRM_DEBUG_KMS("sideband msg set header failed %d\n",
3743 replyblock[0]);
3744 return false;
3745 }
3746
3747 replylen = min(msg->curchunk_len, (u8)(len - hdrlen));
3748 ret = drm_dp_sideband_append_payload(msg, replyblock + hdrlen, replylen);
3749 if (!ret) {
3750 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
3751 return false;
3752 }
3753
3754 replylen = msg->curchunk_len + msg->curchunk_hdrlen - len;
3755 curreply = len;
3756 while (replylen > 0) {
3757 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
3758 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
3759 replyblock, len);
3760 if (ret != len) {
3761 DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
3762 len, ret);
3763 return false;
3764 }
3765
3766 ret = drm_dp_sideband_append_payload(msg, replyblock, len);
3767 if (!ret) {
3768 DRM_DEBUG_KMS("failed to build sideband msg\n");
3769 return false;
3770 }
3771
3772 curreply += len;
3773 replylen -= len;
3774 }
3775 return true;
3776}
3777
3778static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
3779{
3780 struct drm_dp_sideband_msg_tx *txmsg;
3781 struct drm_dp_mst_branch *mstb = NULL;
3782 struct drm_dp_sideband_msg_rx *msg = &mgr->down_rep_recv;
3783
3784 if (!drm_dp_get_one_sb_msg(mgr, false, &mstb))
3785 goto out;
3786
3787
3788 if (!msg->have_eomt)
3789 goto out;
3790
3791
3792 mutex_lock(&mgr->qlock);
3793 txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
3794 struct drm_dp_sideband_msg_tx, next);
3795 mutex_unlock(&mgr->qlock);
3796
3797
3798 if (!txmsg || txmsg->dst != mstb) {
3799 struct drm_dp_sideband_msg_hdr *hdr;
3800
3801 hdr = &msg->initial_hdr;
3802 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
3803 mstb, hdr->seqno, hdr->lct, hdr->rad[0],
3804 msg->msg[0]);
3805 goto out_clear_reply;
3806 }
3807
3808 drm_dp_sideband_parse_reply(msg, &txmsg->reply);
3809
3810 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
3811 DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
3812 txmsg->reply.req_type,
3813 drm_dp_mst_req_type_str(txmsg->reply.req_type),
3814 txmsg->reply.u.nak.reason,
3815 drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
3816 txmsg->reply.u.nak.nak_data);
3817 }
3818
3819 memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
3820 drm_dp_mst_topology_put_mstb(mstb);
3821
3822 mutex_lock(&mgr->qlock);
3823 txmsg->state = DRM_DP_SIDEBAND_TX_RX;
3824 list_del(&txmsg->next);
3825 mutex_unlock(&mgr->qlock);
3826
3827 wake_up_all(&mgr->tx_waitq);
3828
3829 return 0;
3830
3831out_clear_reply:
3832 memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
3833out:
3834 if (mstb)
3835 drm_dp_mst_topology_put_mstb(mstb);
3836
3837 return 0;
3838}
3839
3840static inline bool
3841drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
3842 struct drm_dp_pending_up_req *up_req)
3843{
3844 struct drm_dp_mst_branch *mstb = NULL;
3845 struct drm_dp_sideband_msg_req_body *msg = &up_req->msg;
3846 struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr;
3847 bool hotplug = false;
3848
3849 if (hdr->broadcast) {
3850 const u8 *guid = NULL;
3851
3852 if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY)
3853 guid = msg->u.conn_stat.guid;
3854 else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
3855 guid = msg->u.resource_stat.guid;
3856
3857 if (guid)
3858 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
3859 } else {
3860 mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
3861 }
3862
3863 if (!mstb) {
3864 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
3865 hdr->lct);
3866 return false;
3867 }
3868
3869
3870 if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
3871 drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
3872 hotplug = true;
3873 }
3874
3875 drm_dp_mst_topology_put_mstb(mstb);
3876 return hotplug;
3877}
3878
3879static void drm_dp_mst_up_req_work(struct work_struct *work)
3880{
3881 struct drm_dp_mst_topology_mgr *mgr =
3882 container_of(work, struct drm_dp_mst_topology_mgr,
3883 up_req_work);
3884 struct drm_dp_pending_up_req *up_req;
3885 bool send_hotplug = false;
3886
3887 mutex_lock(&mgr->probe_lock);
3888 while (true) {
3889 mutex_lock(&mgr->up_req_lock);
3890 up_req = list_first_entry_or_null(&mgr->up_req_list,
3891 struct drm_dp_pending_up_req,
3892 next);
3893 if (up_req)
3894 list_del(&up_req->next);
3895 mutex_unlock(&mgr->up_req_lock);
3896
3897 if (!up_req)
3898 break;
3899
3900 send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req);
3901 kfree(up_req);
3902 }
3903 mutex_unlock(&mgr->probe_lock);
3904
3905 if (send_hotplug)
3906 drm_kms_helper_hotplug_event(mgr->dev);
3907}
3908
3909static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
3910{
3911 struct drm_dp_pending_up_req *up_req;
3912
3913 if (!drm_dp_get_one_sb_msg(mgr, true, NULL))
3914 goto out;
3915
3916 if (!mgr->up_req_recv.have_eomt)
3917 return 0;
3918
3919 up_req = kzalloc(sizeof(*up_req), GFP_KERNEL);
3920 if (!up_req) {
3921 DRM_ERROR("Not enough memory to process MST up req\n");
3922 return -ENOMEM;
3923 }
3924 INIT_LIST_HEAD(&up_req->next);
3925
3926 drm_dp_sideband_parse_req(&mgr->up_req_recv, &up_req->msg);
3927
3928 if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY &&
3929 up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) {
3930 DRM_DEBUG_KMS("Received unknown up req type, ignoring: %x\n",
3931 up_req->msg.req_type);
3932 kfree(up_req);
3933 goto out;
3934 }
3935
3936 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
3937 false);
3938
3939 if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
3940 const struct drm_dp_connection_status_notify *conn_stat =
3941 &up_req->msg.u.conn_stat;
3942
3943 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
3944 conn_stat->port_number,
3945 conn_stat->legacy_device_plug_status,
3946 conn_stat->displayport_device_plug_status,
3947 conn_stat->message_capability_status,
3948 conn_stat->input_port,
3949 conn_stat->peer_device_type);
3950 } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
3951 const struct drm_dp_resource_status_notify *res_stat =
3952 &up_req->msg.u.resource_stat;
3953
3954 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n",
3955 res_stat->port_number,
3956 res_stat->available_pbn);
3957 }
3958
3959 up_req->hdr = mgr->up_req_recv.initial_hdr;
3960 mutex_lock(&mgr->up_req_lock);
3961 list_add_tail(&up_req->next, &mgr->up_req_list);
3962 mutex_unlock(&mgr->up_req_lock);
3963 queue_work(system_long_wq, &mgr->up_req_work);
3964
3965out:
3966 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3967 return 0;
3968}
3969
3970
3971
3972
3973
3974
3975
3976
3977
3978
3979
3980
3981int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
3982{
3983 int ret = 0;
3984 int sc;
3985 *handled = false;
3986 sc = esi[0] & 0x3f;
3987
3988 if (sc != mgr->sink_count) {
3989 mgr->sink_count = sc;
3990 *handled = true;
3991 }
3992
3993 if (esi[1] & DP_DOWN_REP_MSG_RDY) {
3994 ret = drm_dp_mst_handle_down_rep(mgr);
3995 *handled = true;
3996 }
3997
3998 if (esi[1] & DP_UP_REQ_MSG_RDY) {
3999 ret |= drm_dp_mst_handle_up_req(mgr);
4000 *handled = true;
4001 }
4002
4003 drm_dp_mst_kick_tx(mgr);
4004 return ret;
4005}
4006EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
4007
4008
4009
4010
4011
4012
4013
4014
4015
4016
4017int
4018drm_dp_mst_detect_port(struct drm_connector *connector,
4019 struct drm_modeset_acquire_ctx *ctx,
4020 struct drm_dp_mst_topology_mgr *mgr,
4021 struct drm_dp_mst_port *port)
4022{
4023 int ret;
4024
4025
4026 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4027 if (!port)
4028 return connector_status_disconnected;
4029
4030 ret = drm_modeset_lock(&mgr->base.lock, ctx);
4031 if (ret)
4032 goto out;
4033
4034 ret = connector_status_disconnected;
4035
4036 if (!port->ddps)
4037 goto out;
4038
4039 switch (port->pdt) {
4040 case DP_PEER_DEVICE_NONE:
4041 case DP_PEER_DEVICE_MST_BRANCHING:
4042 if (!port->mcs)
4043 ret = connector_status_connected;
4044 break;
4045
4046 case DP_PEER_DEVICE_SST_SINK:
4047 ret = connector_status_connected;
4048
4049 if (port->port_num >= 8 && !port->cached_edid) {
4050 port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
4051 }
4052 break;
4053 case DP_PEER_DEVICE_DP_LEGACY_CONV:
4054 if (port->ldps)
4055 ret = connector_status_connected;
4056 break;
4057 }
4058out:
4059 drm_dp_mst_topology_put_port(port);
4060 return ret;
4061}
4062EXPORT_SYMBOL(drm_dp_mst_detect_port);
4063
4064
4065
4066
4067
4068
4069
4070
4071
4072
4073
4074struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4075{
4076 struct edid *edid = NULL;
4077
4078
4079 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4080 if (!port)
4081 return NULL;
4082
4083 if (port->cached_edid)
4084 edid = drm_edid_duplicate(port->cached_edid);
4085 else {
4086 edid = drm_get_edid(connector, &port->aux.ddc);
4087 }
4088 port->has_audio = drm_detect_monitor_audio(edid);
4089 drm_dp_mst_topology_put_port(port);
4090 return edid;
4091}
4092EXPORT_SYMBOL(drm_dp_mst_get_edid);
4093
4094
4095
4096
4097
4098
4099
4100
4101
4102
4103
4104
4105
4106int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
4107 int pbn)
4108{
4109 int num_slots;
4110
4111 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
4112
4113
4114 if (num_slots > 63)
4115 return -ENOSPC;
4116 return num_slots;
4117}
4118EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
4119
4120static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4121 struct drm_dp_vcpi *vcpi, int pbn, int slots)
4122{
4123 int ret;
4124
4125
4126 if (slots > 63)
4127 return -ENOSPC;
4128
4129 vcpi->pbn = pbn;
4130 vcpi->aligned_pbn = slots * mgr->pbn_div;
4131 vcpi->num_slots = slots;
4132
4133 ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
4134 if (ret < 0)
4135 return ret;
4136 return 0;
4137}
4138
4139
4140
4141
4142
4143
4144
4145
4146
4147
4148
4149
4150
4151
4152
4153
4154
4155
4156
4157
4158
4159
4160
4161
4162
4163
4164
4165
4166
4167
4168
4169
4170int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
4171 struct drm_dp_mst_topology_mgr *mgr,
4172 struct drm_dp_mst_port *port, int pbn,
4173 int pbn_div)
4174{
4175 struct drm_dp_mst_topology_state *topology_state;
4176 struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
4177 int prev_slots, prev_bw, req_slots;
4178
4179 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
4180 if (IS_ERR(topology_state))
4181 return PTR_ERR(topology_state);
4182
4183
4184 list_for_each_entry(pos, &topology_state->vcpis, next) {
4185 if (pos->port == port) {
4186 vcpi = pos;
4187 prev_slots = vcpi->vcpi;
4188 prev_bw = vcpi->pbn;
4189
4190
4191
4192
4193
4194
4195 if (WARN_ON(!prev_slots)) {
4196 DRM_ERROR("cannot allocate and release VCPI on [MST PORT:%p] in the same state\n",
4197 port);
4198 return -EINVAL;
4199 }
4200
4201 break;
4202 }
4203 }
4204 if (!vcpi) {
4205 prev_slots = 0;
4206 prev_bw = 0;
4207 }
4208
4209 if (pbn_div <= 0)
4210 pbn_div = mgr->pbn_div;
4211
4212 req_slots = DIV_ROUND_UP(pbn, pbn_div);
4213
4214 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
4215 port->connector->base.id, port->connector->name,
4216 port, prev_slots, req_slots);
4217 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n",
4218 port->connector->base.id, port->connector->name,
4219 port, prev_bw, pbn);
4220
4221
4222 if (!vcpi) {
4223 vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL);
4224 if (!vcpi)
4225 return -ENOMEM;
4226
4227 drm_dp_mst_get_port_malloc(port);
4228 vcpi->port = port;
4229 list_add(&vcpi->next, &topology_state->vcpis);
4230 }
4231 vcpi->vcpi = req_slots;
4232 vcpi->pbn = pbn;
4233
4234 return req_slots;
4235}
4236EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
4237
4238
4239
4240
4241
4242
4243
4244
4245
4246
4247
4248
4249
4250
4251
4252
4253
4254
4255
4256
4257
4258
4259
4260
4261
4262
4263
4264int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
4265 struct drm_dp_mst_topology_mgr *mgr,
4266 struct drm_dp_mst_port *port)
4267{
4268 struct drm_dp_mst_topology_state *topology_state;
4269 struct drm_dp_vcpi_allocation *pos;
4270 bool found = false;
4271
4272 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
4273 if (IS_ERR(topology_state))
4274 return PTR_ERR(topology_state);
4275
4276 list_for_each_entry(pos, &topology_state->vcpis, next) {
4277 if (pos->port == port) {
4278 found = true;
4279 break;
4280 }
4281 }
4282 if (WARN_ON(!found)) {
4283 DRM_ERROR("no VCPI for [MST PORT:%p] found in mst state %p\n",
4284 port, &topology_state->base);
4285 return -EINVAL;
4286 }
4287
4288 DRM_DEBUG_ATOMIC("[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi);
4289 if (pos->vcpi) {
4290 drm_dp_mst_put_port_malloc(port);
4291 pos->vcpi = 0;
4292 pos->pbn = 0;
4293 }
4294
4295 return 0;
4296}
4297EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots);
4298
4299
4300
4301
4302
4303
4304
4305
4306bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4307 struct drm_dp_mst_port *port, int pbn, int slots)
4308{
4309 int ret;
4310
4311 if (slots < 0)
4312 return false;
4313
4314 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4315 if (!port)
4316 return false;
4317
4318 if (port->vcpi.vcpi > 0) {
4319 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
4320 port->vcpi.vcpi, port->vcpi.pbn, pbn);
4321 if (pbn == port->vcpi.pbn) {
4322 drm_dp_mst_topology_put_port(port);
4323 return true;
4324 }
4325 }
4326
4327 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
4328 if (ret) {
4329 DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
4330 DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
4331 drm_dp_mst_topology_put_port(port);
4332 goto out;
4333 }
4334 DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
4335 pbn, port->vcpi.num_slots);
4336
4337
4338 drm_dp_mst_get_port_malloc(port);
4339 drm_dp_mst_topology_put_port(port);
4340 return true;
4341out:
4342 return false;
4343}
4344EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
4345
4346int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4347{
4348 int slots = 0;
4349
4350 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4351 if (!port)
4352 return slots;
4353
4354 slots = port->vcpi.num_slots;
4355 drm_dp_mst_topology_put_port(port);
4356 return slots;
4357}
4358EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
4359
4360
4361
4362
4363
4364
4365
4366
4367void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4368{
4369
4370
4371
4372
4373
4374 port->vcpi.num_slots = 0;
4375}
4376EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
4377
4378
4379
4380
4381
4382
4383
4384
4385
4386void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4387 struct drm_dp_mst_port *port)
4388{
4389 if (!port->vcpi.vcpi)
4390 return;
4391
4392 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
4393 port->vcpi.num_slots = 0;
4394 port->vcpi.pbn = 0;
4395 port->vcpi.aligned_pbn = 0;
4396 port->vcpi.vcpi = 0;
4397 drm_dp_mst_put_port_malloc(port);
4398}
4399EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
4400
4401static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
4402 int id, struct drm_dp_payload *payload)
4403{
4404 u8 payload_alloc[3], status;
4405 int ret;
4406 int retries = 0;
4407
4408 drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
4409 DP_PAYLOAD_TABLE_UPDATED);
4410
4411 payload_alloc[0] = id;
4412 payload_alloc[1] = payload->start_slot;
4413 payload_alloc[2] = payload->num_slots;
4414
4415 ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
4416 if (ret != 3) {
4417 DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
4418 goto fail;
4419 }
4420
4421retry:
4422 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
4423 if (ret < 0) {
4424 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
4425 goto fail;
4426 }
4427
4428 if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
4429 retries++;
4430 if (retries < 20) {
4431 usleep_range(10000, 20000);
4432 goto retry;
4433 }
4434 DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
4435 ret = -EINVAL;
4436 goto fail;
4437 }
4438 ret = 0;
4439fail:
4440 return ret;
4441}
4442
4443static int do_get_act_status(struct drm_dp_aux *aux)
4444{
4445 int ret;
4446 u8 status;
4447
4448 ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
4449 if (ret < 0)
4450 return ret;
4451
4452 return status;
4453}
4454
4455
4456
4457
4458
4459
4460
4461
4462
4463
4464
4465
4466int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
4467{
4468
4469
4470
4471
4472
4473
4474 const int timeout_ms = 3000;
4475 int ret, status;
4476
4477 ret = readx_poll_timeout(do_get_act_status, mgr->aux, status,
4478 status & DP_PAYLOAD_ACT_HANDLED || status < 0,
4479 200, timeout_ms * USEC_PER_MSEC);
4480 if (ret < 0 && status >= 0) {
4481 DRM_ERROR("Failed to get ACT after %dms, last status: %02x\n",
4482 timeout_ms, status);
4483 return -EINVAL;
4484 } else if (status < 0) {
4485
4486
4487
4488
4489 DRM_DEBUG_KMS("Failed to read payload table status: %d\n",
4490 status);
4491 return status;
4492 }
4493
4494 return 0;
4495}
4496EXPORT_SYMBOL(drm_dp_check_act_status);
4497
4498
4499
4500
4501
4502
4503
4504
4505
4506int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc)
4507{
4508
4509
4510
4511
4512
4513
4514
4515
4516
4517
4518
4519
4520
4521
4522
4523 if (dsc)
4524 return DIV_ROUND_UP_ULL(mul_u32_u32(clock * (bpp / 16), 64 * 1006),
4525 8 * 54 * 1000 * 1000);
4526
4527 return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),
4528 8 * 54 * 1000 * 1000);
4529}
4530EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
4531
4532
4533static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
4534{
4535 queue_work(system_long_wq, &mgr->tx_work);
4536}
4537
4538static void drm_dp_mst_dump_mstb(struct seq_file *m,
4539 struct drm_dp_mst_branch *mstb)
4540{
4541 struct drm_dp_mst_port *port;
4542 int tabs = mstb->lct;
4543 char prefix[10];
4544 int i;
4545
4546 for (i = 0; i < tabs; i++)
4547 prefix[i] = '\t';
4548 prefix[i] = '\0';
4549
4550 seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
4551 list_for_each_entry(port, &mstb->ports, next) {
4552 seq_printf(m, "%sport: %d: input: %d: pdt: %d, ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix, port->port_num, port->input, port->pdt, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector);
4553 if (port->mstb)
4554 drm_dp_mst_dump_mstb(m, port->mstb);
4555 }
4556}
4557
4558#define DP_PAYLOAD_TABLE_SIZE 64
4559
4560static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
4561 char *buf)
4562{
4563 int i;
4564
4565 for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
4566 if (drm_dp_dpcd_read(mgr->aux,
4567 DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
4568 &buf[i], 16) != 16)
4569 return false;
4570 }
4571 return true;
4572}
4573
4574static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
4575 struct drm_dp_mst_port *port, char *name,
4576 int namelen)
4577{
4578 struct edid *mst_edid;
4579
4580 mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
4581 drm_edid_get_monitor_name(mst_edid, name, namelen);
4582}
4583
4584
4585
4586
4587
4588
4589
4590
4591void drm_dp_mst_dump_topology(struct seq_file *m,
4592 struct drm_dp_mst_topology_mgr *mgr)
4593{
4594 int i;
4595 struct drm_dp_mst_port *port;
4596
4597 mutex_lock(&mgr->lock);
4598 if (mgr->mst_primary)
4599 drm_dp_mst_dump_mstb(m, mgr->mst_primary);
4600
4601
4602 mutex_unlock(&mgr->lock);
4603
4604 mutex_lock(&mgr->payload_lock);
4605 seq_printf(m, "vcpi: %lx %lx %d\n", mgr->payload_mask, mgr->vcpi_mask,
4606 mgr->max_payloads);
4607
4608 for (i = 0; i < mgr->max_payloads; i++) {
4609 if (mgr->proposed_vcpis[i]) {
4610 char name[14];
4611
4612 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
4613 fetch_monitor_name(mgr, port, name, sizeof(name));
4614 seq_printf(m, "vcpi %d: %d %d %d sink name: %s\n", i,
4615 port->port_num, port->vcpi.vcpi,
4616 port->vcpi.num_slots,
4617 (*name != 0) ? name : "Unknown");
4618 } else
4619 seq_printf(m, "vcpi %d:unused\n", i);
4620 }
4621 for (i = 0; i < mgr->max_payloads; i++) {
4622 seq_printf(m, "payload %d: %d, %d, %d\n",
4623 i,
4624 mgr->payloads[i].payload_state,
4625 mgr->payloads[i].start_slot,
4626 mgr->payloads[i].num_slots);
4627
4628
4629 }
4630 mutex_unlock(&mgr->payload_lock);
4631
4632 mutex_lock(&mgr->lock);
4633 if (mgr->mst_primary) {
4634 u8 buf[DP_PAYLOAD_TABLE_SIZE];
4635 int ret;
4636
4637 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
4638 if (ret) {
4639 seq_printf(m, "dpcd read failed\n");
4640 goto out;
4641 }
4642 seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
4643
4644 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
4645 if (ret) {
4646 seq_printf(m, "faux/mst read failed\n");
4647 goto out;
4648 }
4649 seq_printf(m, "faux/mst: %*ph\n", 2, buf);
4650
4651 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
4652 if (ret) {
4653 seq_printf(m, "mst ctrl read failed\n");
4654 goto out;
4655 }
4656 seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
4657
4658
4659 ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
4660 if (ret) {
4661 seq_printf(m, "branch oui read failed\n");
4662 goto out;
4663 }
4664 seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
4665
4666 for (i = 0x3; i < 0x8 && buf[i]; i++)
4667 seq_printf(m, "%c", buf[i]);
4668 seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
4669 buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
4670 if (dump_dp_payload_table(mgr, buf))
4671 seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
4672 }
4673
4674out:
4675 mutex_unlock(&mgr->lock);
4676
4677}
4678EXPORT_SYMBOL(drm_dp_mst_dump_topology);
4679
4680static void drm_dp_tx_work(struct work_struct *work)
4681{
4682 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
4683
4684 mutex_lock(&mgr->qlock);
4685 if (!list_empty(&mgr->tx_msg_downq))
4686 process_single_down_tx_qlock(mgr);
4687 mutex_unlock(&mgr->qlock);
4688}
4689
4690static inline void
4691drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
4692{
4693 drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
4694
4695 if (port->connector) {
4696 drm_connector_unregister(port->connector);
4697 drm_connector_put(port->connector);
4698 }
4699
4700 drm_dp_mst_put_port_malloc(port);
4701}
4702
4703static inline void
4704drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb)
4705{
4706 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
4707 struct drm_dp_mst_port *port, *port_tmp;
4708 struct drm_dp_sideband_msg_tx *txmsg, *txmsg_tmp;
4709 bool wake_tx = false;
4710
4711 mutex_lock(&mgr->lock);
4712 list_for_each_entry_safe(port, port_tmp, &mstb->ports, next) {
4713 list_del(&port->next);
4714 drm_dp_mst_topology_put_port(port);
4715 }
4716 mutex_unlock(&mgr->lock);
4717
4718
4719 mutex_lock(&mstb->mgr->qlock);
4720 list_for_each_entry_safe(txmsg, txmsg_tmp, &mgr->tx_msg_downq, next) {
4721 if (txmsg->dst != mstb)
4722 continue;
4723
4724 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
4725 list_del(&txmsg->next);
4726 wake_tx = true;
4727 }
4728 mutex_unlock(&mstb->mgr->qlock);
4729
4730 if (wake_tx)
4731 wake_up_all(&mstb->mgr->tx_waitq);
4732
4733 drm_dp_mst_put_mstb_malloc(mstb);
4734}
4735
4736static void drm_dp_delayed_destroy_work(struct work_struct *work)
4737{
4738 struct drm_dp_mst_topology_mgr *mgr =
4739 container_of(work, struct drm_dp_mst_topology_mgr,
4740 delayed_destroy_work);
4741 bool send_hotplug = false, go_again;
4742
4743
4744
4745
4746
4747
4748 do {
4749 go_again = false;
4750
4751 for (;;) {
4752 struct drm_dp_mst_branch *mstb;
4753
4754 mutex_lock(&mgr->delayed_destroy_lock);
4755 mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list,
4756 struct drm_dp_mst_branch,
4757 destroy_next);
4758 if (mstb)
4759 list_del(&mstb->destroy_next);
4760 mutex_unlock(&mgr->delayed_destroy_lock);
4761
4762 if (!mstb)
4763 break;
4764
4765 drm_dp_delayed_destroy_mstb(mstb);
4766 go_again = true;
4767 }
4768
4769 for (;;) {
4770 struct drm_dp_mst_port *port;
4771
4772 mutex_lock(&mgr->delayed_destroy_lock);
4773 port = list_first_entry_or_null(&mgr->destroy_port_list,
4774 struct drm_dp_mst_port,
4775 next);
4776 if (port)
4777 list_del(&port->next);
4778 mutex_unlock(&mgr->delayed_destroy_lock);
4779
4780 if (!port)
4781 break;
4782
4783 drm_dp_delayed_destroy_port(port);
4784 send_hotplug = true;
4785 go_again = true;
4786 }
4787 } while (go_again);
4788
4789 if (send_hotplug)
4790 drm_kms_helper_hotplug_event(mgr->dev);
4791}
4792
4793static struct drm_private_state *
4794drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
4795{
4796 struct drm_dp_mst_topology_state *state, *old_state =
4797 to_dp_mst_topology_state(obj->state);
4798 struct drm_dp_vcpi_allocation *pos, *vcpi;
4799
4800 state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
4801 if (!state)
4802 return NULL;
4803
4804 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
4805
4806 INIT_LIST_HEAD(&state->vcpis);
4807
4808 list_for_each_entry(pos, &old_state->vcpis, next) {
4809
4810 if (!pos->vcpi)
4811 continue;
4812
4813 vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL);
4814 if (!vcpi)
4815 goto fail;
4816
4817 drm_dp_mst_get_port_malloc(vcpi->port);
4818 list_add(&vcpi->next, &state->vcpis);
4819 }
4820
4821 return &state->base;
4822
4823fail:
4824 list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) {
4825 drm_dp_mst_put_port_malloc(pos->port);
4826 kfree(pos);
4827 }
4828 kfree(state);
4829
4830 return NULL;
4831}
4832
4833static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
4834 struct drm_private_state *state)
4835{
4836 struct drm_dp_mst_topology_state *mst_state =
4837 to_dp_mst_topology_state(state);
4838 struct drm_dp_vcpi_allocation *pos, *tmp;
4839
4840 list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) {
4841
4842 if (pos->vcpi)
4843 drm_dp_mst_put_port_malloc(pos->port);
4844 kfree(pos);
4845 }
4846
4847 kfree(mst_state);
4848}
4849
4850static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
4851 struct drm_dp_mst_branch *branch)
4852{
4853 while (port->parent) {
4854 if (port->parent == branch)
4855 return true;
4856
4857 if (port->parent->port_parent)
4858 port = port->parent->port_parent;
4859 else
4860 break;
4861 }
4862 return false;
4863}
4864
4865static int
4866drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
4867 struct drm_dp_mst_topology_state *state);
4868
4869static int
4870drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
4871 struct drm_dp_mst_topology_state *state)
4872{
4873 struct drm_dp_vcpi_allocation *vcpi;
4874 struct drm_dp_mst_port *port;
4875 int pbn_used = 0, ret;
4876 bool found = false;
4877
4878
4879
4880
4881 list_for_each_entry(vcpi, &state->vcpis, next) {
4882 if (!vcpi->pbn ||
4883 !drm_dp_mst_port_downstream_of_branch(vcpi->port, mstb))
4884 continue;
4885
4886 found = true;
4887 break;
4888 }
4889 if (!found)
4890 return 0;
4891
4892 if (mstb->port_parent)
4893 DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] Checking bandwidth limits on [MSTB:%p]\n",
4894 mstb->port_parent->parent, mstb->port_parent,
4895 mstb);
4896 else
4897 DRM_DEBUG_ATOMIC("[MSTB:%p] Checking bandwidth limits\n",
4898 mstb);
4899
4900 list_for_each_entry(port, &mstb->ports, next) {
4901 ret = drm_dp_mst_atomic_check_port_bw_limit(port, state);
4902 if (ret < 0)
4903 return ret;
4904
4905 pbn_used += ret;
4906 }
4907
4908 return pbn_used;
4909}
4910
4911static int
4912drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
4913 struct drm_dp_mst_topology_state *state)
4914{
4915 struct drm_dp_vcpi_allocation *vcpi;
4916 int pbn_used = 0;
4917
4918 if (port->pdt == DP_PEER_DEVICE_NONE)
4919 return 0;
4920
4921 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
4922 bool found = false;
4923
4924 list_for_each_entry(vcpi, &state->vcpis, next) {
4925 if (vcpi->port != port)
4926 continue;
4927 if (!vcpi->pbn)
4928 return 0;
4929
4930 found = true;
4931 break;
4932 }
4933 if (!found)
4934 return 0;
4935
4936
4937
4938
4939 if (WARN_ON(!port->full_pbn))
4940 return -EINVAL;
4941
4942 pbn_used = vcpi->pbn;
4943 } else {
4944 pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb,
4945 state);
4946 if (pbn_used <= 0)
4947 return pbn_used;
4948 }
4949
4950 if (pbn_used > port->full_pbn) {
4951 DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n",
4952 port->parent, port, pbn_used,
4953 port->full_pbn);
4954 return -ENOSPC;
4955 }
4956
4957 DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n",
4958 port->parent, port, pbn_used, port->full_pbn);
4959
4960 return pbn_used;
4961}
4962
4963static inline int
4964drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr,
4965 struct drm_dp_mst_topology_state *mst_state)
4966{
4967 struct drm_dp_vcpi_allocation *vcpi;
4968 int avail_slots = 63, payload_count = 0;
4969
4970 list_for_each_entry(vcpi, &mst_state->vcpis, next) {
4971
4972 if (!vcpi->vcpi) {
4973 DRM_DEBUG_ATOMIC("[MST PORT:%p] releases all VCPI slots\n",
4974 vcpi->port);
4975 continue;
4976 }
4977
4978 DRM_DEBUG_ATOMIC("[MST PORT:%p] requires %d vcpi slots\n",
4979 vcpi->port, vcpi->vcpi);
4980
4981 avail_slots -= vcpi->vcpi;
4982 if (avail_slots < 0) {
4983 DRM_DEBUG_ATOMIC("[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n",
4984 vcpi->port, mst_state,
4985 avail_slots + vcpi->vcpi);
4986 return -ENOSPC;
4987 }
4988
4989 if (++payload_count > mgr->max_payloads) {
4990 DRM_DEBUG_ATOMIC("[MST MGR:%p] state %p has too many payloads (max=%d)\n",
4991 mgr, mst_state, mgr->max_payloads);
4992 return -EINVAL;
4993 }
4994 }
4995 DRM_DEBUG_ATOMIC("[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
4996 mgr, mst_state, avail_slots,
4997 63 - avail_slots);
4998
4999 return 0;
5000}
5001
5002
5003
5004
5005
5006
5007
5008
5009
5010
5011
5012
5013
5014
5015int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr)
5016{
5017 struct drm_dp_mst_topology_state *mst_state;
5018 struct drm_dp_vcpi_allocation *pos;
5019 struct drm_connector *connector;
5020 struct drm_connector_state *conn_state;
5021 struct drm_crtc *crtc;
5022 struct drm_crtc_state *crtc_state;
5023
5024 mst_state = drm_atomic_get_mst_topology_state(state, mgr);
5025
5026 if (IS_ERR(mst_state))
5027 return -EINVAL;
5028
5029 list_for_each_entry(pos, &mst_state->vcpis, next) {
5030
5031 connector = pos->port->connector;
5032
5033 if (!connector)
5034 return -EINVAL;
5035
5036 conn_state = drm_atomic_get_connector_state(state, connector);
5037
5038 if (IS_ERR(conn_state))
5039 return PTR_ERR(conn_state);
5040
5041 crtc = conn_state->crtc;
5042
5043 if (!crtc)
5044 continue;
5045
5046 if (!drm_dp_mst_dsc_aux_for_port(pos->port))
5047 continue;
5048
5049 crtc_state = drm_atomic_get_crtc_state(mst_state->base.state, crtc);
5050
5051 if (IS_ERR(crtc_state))
5052 return PTR_ERR(crtc_state);
5053
5054 DRM_DEBUG_ATOMIC("[MST MGR:%p] Setting mode_changed flag on CRTC %p\n",
5055 mgr, crtc);
5056
5057 crtc_state->mode_changed = true;
5058 }
5059 return 0;
5060}
5061EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs);
5062
5063
5064
5065
5066
5067
5068
5069
5070
5071
5072
5073
5074
5075
5076
5077int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
5078 struct drm_dp_mst_port *port,
5079 int pbn, int pbn_div,
5080 bool enable)
5081{
5082 struct drm_dp_mst_topology_state *mst_state;
5083 struct drm_dp_vcpi_allocation *pos;
5084 bool found = false;
5085 int vcpi = 0;
5086
5087 mst_state = drm_atomic_get_mst_topology_state(state, port->mgr);
5088
5089 if (IS_ERR(mst_state))
5090 return PTR_ERR(mst_state);
5091
5092 list_for_each_entry(pos, &mst_state->vcpis, next) {
5093 if (pos->port == port) {
5094 found = true;
5095 break;
5096 }
5097 }
5098
5099 if (!found) {
5100 DRM_DEBUG_ATOMIC("[MST PORT:%p] Couldn't find VCPI allocation in mst state %p\n",
5101 port, mst_state);
5102 return -EINVAL;
5103 }
5104
5105 if (pos->dsc_enabled == enable) {
5106 DRM_DEBUG_ATOMIC("[MST PORT:%p] DSC flag is already set to %d, returning %d VCPI slots\n",
5107 port, enable, pos->vcpi);
5108 vcpi = pos->vcpi;
5109 }
5110
5111 if (enable) {
5112 vcpi = drm_dp_atomic_find_vcpi_slots(state, port->mgr, port, pbn, pbn_div);
5113 DRM_DEBUG_ATOMIC("[MST PORT:%p] Enabling DSC flag, reallocating %d VCPI slots on the port\n",
5114 port, vcpi);
5115 if (vcpi < 0)
5116 return -EINVAL;
5117 }
5118
5119 pos->dsc_enabled = enable;
5120
5121 return vcpi;
5122}
5123EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
5124
5125
5126
5127
5128
5129
5130
5131
5132
5133
5134
5135
5136
5137
5138
5139
5140
5141
5142
5143
5144
5145int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
5146{
5147 struct drm_dp_mst_topology_mgr *mgr;
5148 struct drm_dp_mst_topology_state *mst_state;
5149 int i, ret = 0;
5150
5151 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
5152 if (!mgr->mst_state)
5153 continue;
5154
5155 ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state);
5156 if (ret)
5157 break;
5158
5159 mutex_lock(&mgr->lock);
5160 ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary,
5161 mst_state);
5162 mutex_unlock(&mgr->lock);
5163 if (ret < 0)
5164 break;
5165 else
5166 ret = 0;
5167 }
5168
5169 return ret;
5170}
5171EXPORT_SYMBOL(drm_dp_mst_atomic_check);
5172
5173const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
5174 .atomic_duplicate_state = drm_dp_mst_duplicate_state,
5175 .atomic_destroy_state = drm_dp_mst_destroy_state,
5176};
5177EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
5178
5179
5180
5181
5182
5183
5184
5185
5186
5187
5188
5189
5190
5191
5192
5193
5194struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
5195 struct drm_dp_mst_topology_mgr *mgr)
5196{
5197 return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
5198}
5199EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
5200
5201
5202
5203
5204
5205
5206
5207
5208
5209
5210
5211
5212int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
5213 struct drm_device *dev, struct drm_dp_aux *aux,
5214 int max_dpcd_transaction_bytes,
5215 int max_payloads, int conn_base_id)
5216{
5217 struct drm_dp_mst_topology_state *mst_state;
5218
5219 mutex_init(&mgr->lock);
5220 mutex_init(&mgr->qlock);
5221 mutex_init(&mgr->payload_lock);
5222 mutex_init(&mgr->delayed_destroy_lock);
5223 mutex_init(&mgr->up_req_lock);
5224 mutex_init(&mgr->probe_lock);
5225#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
5226 mutex_init(&mgr->topology_ref_history_lock);
5227#endif
5228 INIT_LIST_HEAD(&mgr->tx_msg_downq);
5229 INIT_LIST_HEAD(&mgr->destroy_port_list);
5230 INIT_LIST_HEAD(&mgr->destroy_branch_device_list);
5231 INIT_LIST_HEAD(&mgr->up_req_list);
5232
5233
5234
5235
5236
5237 mgr->delayed_destroy_wq = alloc_ordered_workqueue("drm_dp_mst_wq", 0);
5238 if (mgr->delayed_destroy_wq == NULL)
5239 return -ENOMEM;
5240
5241 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
5242 INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
5243 INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work);
5244 INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work);
5245 init_waitqueue_head(&mgr->tx_waitq);
5246 mgr->dev = dev;
5247 mgr->aux = aux;
5248 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
5249 mgr->max_payloads = max_payloads;
5250 mgr->conn_base_id = conn_base_id;
5251 if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
5252 max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
5253 return -EINVAL;
5254 mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
5255 if (!mgr->payloads)
5256 return -ENOMEM;
5257 mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
5258 if (!mgr->proposed_vcpis)
5259 return -ENOMEM;
5260 set_bit(0, &mgr->payload_mask);
5261
5262 mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
5263 if (mst_state == NULL)
5264 return -ENOMEM;
5265
5266 mst_state->mgr = mgr;
5267 INIT_LIST_HEAD(&mst_state->vcpis);
5268
5269 drm_atomic_private_obj_init(dev, &mgr->base,
5270 &mst_state->base,
5271 &drm_dp_mst_topology_state_funcs);
5272
5273 return 0;
5274}
5275EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
5276
5277
5278
5279
5280
5281void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
5282{
5283 drm_dp_mst_topology_mgr_set_mst(mgr, false);
5284 flush_work(&mgr->work);
5285
5286 if (mgr->delayed_destroy_wq) {
5287 destroy_workqueue(mgr->delayed_destroy_wq);
5288 mgr->delayed_destroy_wq = NULL;
5289 }
5290 mutex_lock(&mgr->payload_lock);
5291 kfree(mgr->payloads);
5292 mgr->payloads = NULL;
5293 kfree(mgr->proposed_vcpis);
5294 mgr->proposed_vcpis = NULL;
5295 mutex_unlock(&mgr->payload_lock);
5296 mgr->dev = NULL;
5297 mgr->aux = NULL;
5298 drm_atomic_private_obj_fini(&mgr->base);
5299 mgr->funcs = NULL;
5300
5301 mutex_destroy(&mgr->delayed_destroy_lock);
5302 mutex_destroy(&mgr->payload_lock);
5303 mutex_destroy(&mgr->qlock);
5304 mutex_destroy(&mgr->lock);
5305 mutex_destroy(&mgr->up_req_lock);
5306 mutex_destroy(&mgr->probe_lock);
5307#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
5308 mutex_destroy(&mgr->topology_ref_history_lock);
5309#endif
5310}
5311EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
5312
5313static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
5314{
5315 int i;
5316
5317 if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)
5318 return false;
5319
5320 for (i = 0; i < num - 1; i++) {
5321 if (msgs[i].flags & I2C_M_RD ||
5322 msgs[i].len > 0xff)
5323 return false;
5324 }
5325
5326 return msgs[num - 1].flags & I2C_M_RD &&
5327 msgs[num - 1].len <= 0xff;
5328}
5329
5330
5331static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
5332 int num)
5333{
5334 struct drm_dp_aux *aux = adapter->algo_data;
5335 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
5336 struct drm_dp_mst_branch *mstb;
5337 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
5338 unsigned int i;
5339 struct drm_dp_sideband_msg_req_body msg;
5340 struct drm_dp_sideband_msg_tx *txmsg = NULL;
5341 int ret;
5342
5343 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
5344 if (!mstb)
5345 return -EREMOTEIO;
5346
5347 if (!remote_i2c_read_ok(msgs, num)) {
5348 DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
5349 ret = -EIO;
5350 goto out;
5351 }
5352
5353 memset(&msg, 0, sizeof(msg));
5354 msg.req_type = DP_REMOTE_I2C_READ;
5355 msg.u.i2c_read.num_transactions = num - 1;
5356 msg.u.i2c_read.port_number = port->port_num;
5357 for (i = 0; i < num - 1; i++) {
5358 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
5359 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
5360 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
5361 msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
5362 }
5363 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
5364 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
5365
5366 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
5367 if (!txmsg) {
5368 ret = -ENOMEM;
5369 goto out;
5370 }
5371
5372 txmsg->dst = mstb;
5373 drm_dp_encode_sideband_req(&msg, txmsg);
5374
5375 drm_dp_queue_down_tx(mgr, txmsg);
5376
5377 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
5378 if (ret > 0) {
5379
5380 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
5381 ret = -EREMOTEIO;
5382 goto out;
5383 }
5384 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
5385 ret = -EIO;
5386 goto out;
5387 }
5388 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
5389 ret = num;
5390 }
5391out:
5392 kfree(txmsg);
5393 drm_dp_mst_topology_put_mstb(mstb);
5394 return ret;
5395}
5396
5397static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
5398{
5399 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
5400 I2C_FUNC_SMBUS_READ_BLOCK_DATA |
5401 I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
5402 I2C_FUNC_10BIT_ADDR;
5403}
5404
5405static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
5406 .functionality = drm_dp_mst_i2c_functionality,
5407 .master_xfer = drm_dp_mst_i2c_xfer,
5408};
5409
5410
5411
5412
5413
5414
5415
5416static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port)
5417{
5418 struct drm_dp_aux *aux = &port->aux;
5419 struct device *parent_dev = port->mgr->dev->dev;
5420
5421 aux->ddc.algo = &drm_dp_mst_i2c_algo;
5422 aux->ddc.algo_data = aux;
5423 aux->ddc.retries = 3;
5424
5425 aux->ddc.class = I2C_CLASS_DDC;
5426 aux->ddc.owner = THIS_MODULE;
5427
5428 aux->ddc.dev.parent = parent_dev;
5429 aux->ddc.dev.of_node = parent_dev->of_node;
5430
5431 strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(parent_dev),
5432 sizeof(aux->ddc.name));
5433
5434 return i2c_add_adapter(&aux->ddc);
5435}
5436
5437
5438
5439
5440
5441static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port)
5442{
5443 i2c_del_adapter(&port->aux.ddc);
5444}
5445
5446
5447
5448
5449
5450
5451
5452
5453
5454
5455
5456
5457
5458
5459
5460
5461
5462static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port)
5463{
5464 struct drm_dp_mst_port *downstream_port;
5465
5466 if (!port || port->dpcd_rev < DP_DPCD_REV_14)
5467 return false;
5468
5469
5470 if (port->port_num >= 8)
5471 return true;
5472
5473
5474 if (port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV &&
5475 !port->mcs &&
5476 port->ldps)
5477 return true;
5478
5479
5480 mutex_lock(&port->mgr->lock);
5481 if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
5482 port->mstb &&
5483 port->mstb->num_ports == 2) {
5484 list_for_each_entry(downstream_port, &port->mstb->ports, next) {
5485 if (downstream_port->pdt == DP_PEER_DEVICE_SST_SINK &&
5486 !downstream_port->input) {
5487 mutex_unlock(&port->mgr->lock);
5488 return true;
5489 }
5490 }
5491 }
5492 mutex_unlock(&port->mgr->lock);
5493
5494 return false;
5495}
5496
5497
5498
5499
5500
5501
5502
5503
5504
5505
5506
5507
5508
5509
5510
5511
5512
5513struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
5514{
5515 struct drm_dp_mst_port *immediate_upstream_port;
5516 struct drm_dp_mst_port *fec_port;
5517 struct drm_dp_desc desc = {};
5518 u8 endpoint_fec;
5519 u8 endpoint_dsc;
5520
5521 if (!port)
5522 return NULL;
5523
5524 if (port->parent->port_parent)
5525 immediate_upstream_port = port->parent->port_parent;
5526 else
5527 immediate_upstream_port = NULL;
5528
5529 fec_port = immediate_upstream_port;
5530 while (fec_port) {
5531
5532
5533
5534
5535 if (!drm_dp_mst_is_virtual_dpcd(fec_port) &&
5536 !fec_port->fec_capable)
5537 return NULL;
5538
5539 fec_port = fec_port->parent->port_parent;
5540 }
5541
5542
5543 if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
5544 u8 upstream_dsc;
5545
5546 if (drm_dp_dpcd_read(&port->aux,
5547 DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
5548 return NULL;
5549 if (drm_dp_dpcd_read(&port->aux,
5550 DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
5551 return NULL;
5552 if (drm_dp_dpcd_read(&immediate_upstream_port->aux,
5553 DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
5554 return NULL;
5555
5556
5557 if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
5558 (endpoint_fec & DP_FEC_CAPABLE) &&
5559 (upstream_dsc & 0x2) )
5560 return &port->aux;
5561
5562
5563 return &immediate_upstream_port->aux;
5564 }
5565
5566
5567 if (drm_dp_mst_is_virtual_dpcd(port))
5568 return &port->aux;
5569
5570
5571
5572
5573
5574
5575
5576
5577
5578 if (drm_dp_read_desc(port->mgr->aux, &desc, true))
5579 return NULL;
5580
5581 if (drm_dp_has_quirk(&desc, 0,
5582 DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
5583 port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
5584 port->parent == port->mgr->mst_primary) {
5585 u8 downstreamport;
5586
5587 if (drm_dp_dpcd_read(&port->aux, DP_DOWNSTREAMPORT_PRESENT,
5588 &downstreamport, 1) < 0)
5589 return NULL;
5590
5591 if ((downstreamport & DP_DWN_STRM_PORT_PRESENT) &&
5592 ((downstreamport & DP_DWN_STRM_PORT_TYPE_MASK)
5593 != DP_DWN_STRM_PORT_TYPE_ANALOG))
5594 return port->mgr->aux;
5595 }
5596
5597
5598
5599
5600
5601
5602
5603 if (drm_dp_dpcd_read(&port->aux,
5604 DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
5605 return NULL;
5606 if (drm_dp_dpcd_read(&port->aux,
5607 DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
5608 return NULL;
5609 if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
5610 (endpoint_fec & DP_FEC_CAPABLE))
5611 return &port->aux;
5612
5613 return NULL;
5614}
5615EXPORT_SYMBOL(drm_dp_mst_dsc_aux_for_port);
5616