1
2
3
4
5
6
7
8
9
10#include <linux/device.h>
11#include <linux/delay.h>
12#include <linux/kmod.h>
13#include <linux/module.h>
14#include <linux/pm_runtime.h>
15#include <linux/utsname.h>
16#include <linux/uuid.h>
17#include <linux/workqueue.h>
18
19#include "tb.h"
20
21#define XDOMAIN_DEFAULT_TIMEOUT 5000
22#define XDOMAIN_UUID_RETRIES 10
23#define XDOMAIN_PROPERTIES_RETRIES 60
24#define XDOMAIN_PROPERTIES_CHANGED_RETRIES 10
25#define XDOMAIN_BONDING_WAIT 100
26
27struct xdomain_request_work {
28 struct work_struct work;
29 struct tb_xdp_header *pkg;
30 struct tb *tb;
31};
32
33static bool tb_xdomain_enabled = true;
34module_param_named(xdomain, tb_xdomain_enabled, bool, 0444);
35MODULE_PARM_DESC(xdomain, "allow XDomain protocol (default: true)");
36
37
38static DEFINE_MUTEX(xdomain_lock);
39
40
41static struct tb_property_dir *xdomain_property_dir;
42static u32 *xdomain_property_block;
43static u32 xdomain_property_block_len;
44static u32 xdomain_property_block_gen;
45
46
47static LIST_HEAD(protocol_handlers);
48
49
50static const uuid_t tb_xdp_uuid =
51 UUID_INIT(0xb638d70e, 0x42ff, 0x40bb,
52 0x97, 0xc2, 0x90, 0xe2, 0xc0, 0xb2, 0xff, 0x07);
53
54bool tb_is_xdomain_enabled(void)
55{
56 return tb_xdomain_enabled && tb_acpi_is_xdomain_allowed();
57}
58
59static bool tb_xdomain_match(const struct tb_cfg_request *req,
60 const struct ctl_pkg *pkg)
61{
62 switch (pkg->frame.eof) {
63 case TB_CFG_PKG_ERROR:
64 return true;
65
66 case TB_CFG_PKG_XDOMAIN_RESP: {
67 const struct tb_xdp_header *res_hdr = pkg->buffer;
68 const struct tb_xdp_header *req_hdr = req->request;
69
70 if (pkg->frame.size < req->response_size / 4)
71 return false;
72
73
74 if ((res_hdr->xd_hdr.route_hi & ~BIT(31)) !=
75 req_hdr->xd_hdr.route_hi)
76 return false;
77 if ((res_hdr->xd_hdr.route_lo) != req_hdr->xd_hdr.route_lo)
78 return false;
79
80
81 if (!uuid_equal(&res_hdr->uuid, &req_hdr->uuid))
82 return false;
83
84 return true;
85 }
86
87 default:
88 return false;
89 }
90}
91
92static bool tb_xdomain_copy(struct tb_cfg_request *req,
93 const struct ctl_pkg *pkg)
94{
95 memcpy(req->response, pkg->buffer, req->response_size);
96 req->result.err = 0;
97 return true;
98}
99
100static void response_ready(void *data)
101{
102 tb_cfg_request_put(data);
103}
104
105static int __tb_xdomain_response(struct tb_ctl *ctl, const void *response,
106 size_t size, enum tb_cfg_pkg_type type)
107{
108 struct tb_cfg_request *req;
109
110 req = tb_cfg_request_alloc();
111 if (!req)
112 return -ENOMEM;
113
114 req->match = tb_xdomain_match;
115 req->copy = tb_xdomain_copy;
116 req->request = response;
117 req->request_size = size;
118 req->request_type = type;
119
120 return tb_cfg_request(ctl, req, response_ready, req);
121}
122
123
124
125
126
127
128
129
130
131
132
133
134
135int tb_xdomain_response(struct tb_xdomain *xd, const void *response,
136 size_t size, enum tb_cfg_pkg_type type)
137{
138 return __tb_xdomain_response(xd->tb->ctl, response, size, type);
139}
140EXPORT_SYMBOL_GPL(tb_xdomain_response);
141
142static int __tb_xdomain_request(struct tb_ctl *ctl, const void *request,
143 size_t request_size, enum tb_cfg_pkg_type request_type, void *response,
144 size_t response_size, enum tb_cfg_pkg_type response_type,
145 unsigned int timeout_msec)
146{
147 struct tb_cfg_request *req;
148 struct tb_cfg_result res;
149
150 req = tb_cfg_request_alloc();
151 if (!req)
152 return -ENOMEM;
153
154 req->match = tb_xdomain_match;
155 req->copy = tb_xdomain_copy;
156 req->request = request;
157 req->request_size = request_size;
158 req->request_type = request_type;
159 req->response = response;
160 req->response_size = response_size;
161 req->response_type = response_type;
162
163 res = tb_cfg_request_sync(ctl, req, timeout_msec);
164
165 tb_cfg_request_put(req);
166
167 return res.err == 1 ? -EIO : res.err;
168}
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187int tb_xdomain_request(struct tb_xdomain *xd, const void *request,
188 size_t request_size, enum tb_cfg_pkg_type request_type,
189 void *response, size_t response_size,
190 enum tb_cfg_pkg_type response_type, unsigned int timeout_msec)
191{
192 return __tb_xdomain_request(xd->tb->ctl, request, request_size,
193 request_type, response, response_size,
194 response_type, timeout_msec);
195}
196EXPORT_SYMBOL_GPL(tb_xdomain_request);
197
198static inline void tb_xdp_fill_header(struct tb_xdp_header *hdr, u64 route,
199 u8 sequence, enum tb_xdp_type type, size_t size)
200{
201 u32 length_sn;
202
203 length_sn = (size - sizeof(hdr->xd_hdr)) / 4;
204 length_sn |= (sequence << TB_XDOMAIN_SN_SHIFT) & TB_XDOMAIN_SN_MASK;
205
206 hdr->xd_hdr.route_hi = upper_32_bits(route);
207 hdr->xd_hdr.route_lo = lower_32_bits(route);
208 hdr->xd_hdr.length_sn = length_sn;
209 hdr->type = type;
210 memcpy(&hdr->uuid, &tb_xdp_uuid, sizeof(tb_xdp_uuid));
211}
212
213static int tb_xdp_handle_error(const struct tb_xdp_header *hdr)
214{
215 const struct tb_xdp_error_response *error;
216
217 if (hdr->type != ERROR_RESPONSE)
218 return 0;
219
220 error = (const struct tb_xdp_error_response *)hdr;
221
222 switch (error->error) {
223 case ERROR_UNKNOWN_PACKET:
224 case ERROR_UNKNOWN_DOMAIN:
225 return -EIO;
226 case ERROR_NOT_SUPPORTED:
227 return -ENOTSUPP;
228 case ERROR_NOT_READY:
229 return -EAGAIN;
230 default:
231 break;
232 }
233
234 return 0;
235}
236
237static int tb_xdp_uuid_request(struct tb_ctl *ctl, u64 route, int retry,
238 uuid_t *uuid)
239{
240 struct tb_xdp_uuid_response res;
241 struct tb_xdp_uuid req;
242 int ret;
243
244 memset(&req, 0, sizeof(req));
245 tb_xdp_fill_header(&req.hdr, route, retry % 4, UUID_REQUEST,
246 sizeof(req));
247
248 memset(&res, 0, sizeof(res));
249 ret = __tb_xdomain_request(ctl, &req, sizeof(req),
250 TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res),
251 TB_CFG_PKG_XDOMAIN_RESP,
252 XDOMAIN_DEFAULT_TIMEOUT);
253 if (ret)
254 return ret;
255
256 ret = tb_xdp_handle_error(&res.hdr);
257 if (ret)
258 return ret;
259
260 uuid_copy(uuid, &res.src_uuid);
261 return 0;
262}
263
264static int tb_xdp_uuid_response(struct tb_ctl *ctl, u64 route, u8 sequence,
265 const uuid_t *uuid)
266{
267 struct tb_xdp_uuid_response res;
268
269 memset(&res, 0, sizeof(res));
270 tb_xdp_fill_header(&res.hdr, route, sequence, UUID_RESPONSE,
271 sizeof(res));
272
273 uuid_copy(&res.src_uuid, uuid);
274 res.src_route_hi = upper_32_bits(route);
275 res.src_route_lo = lower_32_bits(route);
276
277 return __tb_xdomain_response(ctl, &res, sizeof(res),
278 TB_CFG_PKG_XDOMAIN_RESP);
279}
280
281static int tb_xdp_error_response(struct tb_ctl *ctl, u64 route, u8 sequence,
282 enum tb_xdp_error error)
283{
284 struct tb_xdp_error_response res;
285
286 memset(&res, 0, sizeof(res));
287 tb_xdp_fill_header(&res.hdr, route, sequence, ERROR_RESPONSE,
288 sizeof(res));
289 res.error = error;
290
291 return __tb_xdomain_response(ctl, &res, sizeof(res),
292 TB_CFG_PKG_XDOMAIN_RESP);
293}
294
295static int tb_xdp_properties_request(struct tb_ctl *ctl, u64 route,
296 const uuid_t *src_uuid, const uuid_t *dst_uuid, int retry,
297 u32 **block, u32 *generation)
298{
299 struct tb_xdp_properties_response *res;
300 struct tb_xdp_properties req;
301 u16 data_len, len;
302 size_t total_size;
303 u32 *data = NULL;
304 int ret;
305
306 total_size = sizeof(*res) + TB_XDP_PROPERTIES_MAX_DATA_LENGTH * 4;
307 res = kzalloc(total_size, GFP_KERNEL);
308 if (!res)
309 return -ENOMEM;
310
311 memset(&req, 0, sizeof(req));
312 tb_xdp_fill_header(&req.hdr, route, retry % 4, PROPERTIES_REQUEST,
313 sizeof(req));
314 memcpy(&req.src_uuid, src_uuid, sizeof(*src_uuid));
315 memcpy(&req.dst_uuid, dst_uuid, sizeof(*dst_uuid));
316
317 len = 0;
318 data_len = 0;
319
320 do {
321 ret = __tb_xdomain_request(ctl, &req, sizeof(req),
322 TB_CFG_PKG_XDOMAIN_REQ, res,
323 total_size, TB_CFG_PKG_XDOMAIN_RESP,
324 XDOMAIN_DEFAULT_TIMEOUT);
325 if (ret)
326 goto err;
327
328 ret = tb_xdp_handle_error(&res->hdr);
329 if (ret)
330 goto err;
331
332
333
334
335
336
337 len = res->hdr.xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK;
338 if (len < sizeof(*res) / 4) {
339 ret = -EINVAL;
340 goto err;
341 }
342
343 len += sizeof(res->hdr.xd_hdr) / 4;
344 len -= sizeof(*res) / 4;
345
346 if (res->offset != req.offset) {
347 ret = -EINVAL;
348 goto err;
349 }
350
351
352
353
354
355 if (!data) {
356 data_len = res->data_length;
357 if (data_len > TB_XDP_PROPERTIES_MAX_LENGTH) {
358 ret = -E2BIG;
359 goto err;
360 }
361
362 data = kcalloc(data_len, sizeof(u32), GFP_KERNEL);
363 if (!data) {
364 ret = -ENOMEM;
365 goto err;
366 }
367 }
368
369 memcpy(data + req.offset, res->data, len * 4);
370 req.offset += len;
371 } while (!data_len || req.offset < data_len);
372
373 *block = data;
374 *generation = res->generation;
375
376 kfree(res);
377
378 return data_len;
379
380err:
381 kfree(data);
382 kfree(res);
383
384 return ret;
385}
386
387static int tb_xdp_properties_response(struct tb *tb, struct tb_ctl *ctl,
388 u64 route, u8 sequence, const uuid_t *src_uuid,
389 const struct tb_xdp_properties *req)
390{
391 struct tb_xdp_properties_response *res;
392 size_t total_size;
393 u16 len;
394 int ret;
395
396
397
398
399
400
401 if (!uuid_equal(src_uuid, &req->dst_uuid)) {
402 tb_xdp_error_response(ctl, route, sequence,
403 ERROR_UNKNOWN_DOMAIN);
404 return 0;
405 }
406
407 mutex_lock(&xdomain_lock);
408
409 if (req->offset >= xdomain_property_block_len) {
410 mutex_unlock(&xdomain_lock);
411 return -EINVAL;
412 }
413
414 len = xdomain_property_block_len - req->offset;
415 len = min_t(u16, len, TB_XDP_PROPERTIES_MAX_DATA_LENGTH);
416 total_size = sizeof(*res) + len * 4;
417
418 res = kzalloc(total_size, GFP_KERNEL);
419 if (!res) {
420 mutex_unlock(&xdomain_lock);
421 return -ENOMEM;
422 }
423
424 tb_xdp_fill_header(&res->hdr, route, sequence, PROPERTIES_RESPONSE,
425 total_size);
426 res->generation = xdomain_property_block_gen;
427 res->data_length = xdomain_property_block_len;
428 res->offset = req->offset;
429 uuid_copy(&res->src_uuid, src_uuid);
430 uuid_copy(&res->dst_uuid, &req->src_uuid);
431 memcpy(res->data, &xdomain_property_block[req->offset], len * 4);
432
433 mutex_unlock(&xdomain_lock);
434
435 ret = __tb_xdomain_response(ctl, res, total_size,
436 TB_CFG_PKG_XDOMAIN_RESP);
437
438 kfree(res);
439 return ret;
440}
441
442static int tb_xdp_properties_changed_request(struct tb_ctl *ctl, u64 route,
443 int retry, const uuid_t *uuid)
444{
445 struct tb_xdp_properties_changed_response res;
446 struct tb_xdp_properties_changed req;
447 int ret;
448
449 memset(&req, 0, sizeof(req));
450 tb_xdp_fill_header(&req.hdr, route, retry % 4,
451 PROPERTIES_CHANGED_REQUEST, sizeof(req));
452 uuid_copy(&req.src_uuid, uuid);
453
454 memset(&res, 0, sizeof(res));
455 ret = __tb_xdomain_request(ctl, &req, sizeof(req),
456 TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res),
457 TB_CFG_PKG_XDOMAIN_RESP,
458 XDOMAIN_DEFAULT_TIMEOUT);
459 if (ret)
460 return ret;
461
462 return tb_xdp_handle_error(&res.hdr);
463}
464
465static int
466tb_xdp_properties_changed_response(struct tb_ctl *ctl, u64 route, u8 sequence)
467{
468 struct tb_xdp_properties_changed_response res;
469
470 memset(&res, 0, sizeof(res));
471 tb_xdp_fill_header(&res.hdr, route, sequence,
472 PROPERTIES_CHANGED_RESPONSE, sizeof(res));
473 return __tb_xdomain_response(ctl, &res, sizeof(res),
474 TB_CFG_PKG_XDOMAIN_RESP);
475}
476
477
478
479
480
481
482
483
484
485
486int tb_register_protocol_handler(struct tb_protocol_handler *handler)
487{
488 if (!handler->uuid || !handler->callback)
489 return -EINVAL;
490 if (uuid_equal(handler->uuid, &tb_xdp_uuid))
491 return -EINVAL;
492
493 mutex_lock(&xdomain_lock);
494 list_add_tail(&handler->list, &protocol_handlers);
495 mutex_unlock(&xdomain_lock);
496
497 return 0;
498}
499EXPORT_SYMBOL_GPL(tb_register_protocol_handler);
500
501
502
503
504
505
506
507void tb_unregister_protocol_handler(struct tb_protocol_handler *handler)
508{
509 mutex_lock(&xdomain_lock);
510 list_del_init(&handler->list);
511 mutex_unlock(&xdomain_lock);
512}
513EXPORT_SYMBOL_GPL(tb_unregister_protocol_handler);
514
515static int rebuild_property_block(void)
516{
517 u32 *block, len;
518 int ret;
519
520 ret = tb_property_format_dir(xdomain_property_dir, NULL, 0);
521 if (ret < 0)
522 return ret;
523
524 len = ret;
525
526 block = kcalloc(len, sizeof(u32), GFP_KERNEL);
527 if (!block)
528 return -ENOMEM;
529
530 ret = tb_property_format_dir(xdomain_property_dir, block, len);
531 if (ret) {
532 kfree(block);
533 return ret;
534 }
535
536 kfree(xdomain_property_block);
537 xdomain_property_block = block;
538 xdomain_property_block_len = len;
539 xdomain_property_block_gen++;
540
541 return 0;
542}
543
544static void finalize_property_block(void)
545{
546 const struct tb_property *nodename;
547
548
549
550
551
552
553 mutex_lock(&xdomain_lock);
554 nodename = tb_property_find(xdomain_property_dir, "deviceid",
555 TB_PROPERTY_TYPE_TEXT);
556 if (!nodename) {
557 tb_property_add_text(xdomain_property_dir, "deviceid",
558 utsname()->nodename);
559 rebuild_property_block();
560 }
561 mutex_unlock(&xdomain_lock);
562}
563
564static void tb_xdp_handle_request(struct work_struct *work)
565{
566 struct xdomain_request_work *xw = container_of(work, typeof(*xw), work);
567 const struct tb_xdp_header *pkg = xw->pkg;
568 const struct tb_xdomain_header *xhdr = &pkg->xd_hdr;
569 struct tb *tb = xw->tb;
570 struct tb_ctl *ctl = tb->ctl;
571 const uuid_t *uuid;
572 int ret = 0;
573 u32 sequence;
574 u64 route;
575
576 route = ((u64)xhdr->route_hi << 32 | xhdr->route_lo) & ~BIT_ULL(63);
577 sequence = xhdr->length_sn & TB_XDOMAIN_SN_MASK;
578 sequence >>= TB_XDOMAIN_SN_SHIFT;
579
580 mutex_lock(&tb->lock);
581 if (tb->root_switch)
582 uuid = tb->root_switch->uuid;
583 else
584 uuid = NULL;
585 mutex_unlock(&tb->lock);
586
587 if (!uuid) {
588 tb_xdp_error_response(ctl, route, sequence, ERROR_NOT_READY);
589 goto out;
590 }
591
592 finalize_property_block();
593
594 switch (pkg->type) {
595 case PROPERTIES_REQUEST:
596 ret = tb_xdp_properties_response(tb, ctl, route, sequence, uuid,
597 (const struct tb_xdp_properties *)pkg);
598 break;
599
600 case PROPERTIES_CHANGED_REQUEST: {
601 struct tb_xdomain *xd;
602
603 ret = tb_xdp_properties_changed_response(ctl, route, sequence);
604
605
606
607
608
609
610 xd = tb_xdomain_find_by_route_locked(tb, route);
611 if (xd) {
612 if (device_is_registered(&xd->dev)) {
613 queue_delayed_work(tb->wq, &xd->get_properties_work,
614 msecs_to_jiffies(50));
615 }
616 tb_xdomain_put(xd);
617 }
618
619 break;
620 }
621
622 case UUID_REQUEST_OLD:
623 case UUID_REQUEST:
624 ret = tb_xdp_uuid_response(ctl, route, sequence, uuid);
625 break;
626
627 default:
628 tb_xdp_error_response(ctl, route, sequence,
629 ERROR_NOT_SUPPORTED);
630 break;
631 }
632
633 if (ret) {
634 tb_warn(tb, "failed to send XDomain response for %#x\n",
635 pkg->type);
636 }
637
638out:
639 kfree(xw->pkg);
640 kfree(xw);
641
642 tb_domain_put(tb);
643}
644
645static bool
646tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr,
647 size_t size)
648{
649 struct xdomain_request_work *xw;
650
651 xw = kmalloc(sizeof(*xw), GFP_KERNEL);
652 if (!xw)
653 return false;
654
655 INIT_WORK(&xw->work, tb_xdp_handle_request);
656 xw->pkg = kmemdup(hdr, size, GFP_KERNEL);
657 if (!xw->pkg) {
658 kfree(xw);
659 return false;
660 }
661 xw->tb = tb_domain_get(tb);
662
663 schedule_work(&xw->work);
664 return true;
665}
666
667
668
669
670
671
672
673int tb_register_service_driver(struct tb_service_driver *drv)
674{
675 drv->driver.bus = &tb_bus_type;
676 return driver_register(&drv->driver);
677}
678EXPORT_SYMBOL_GPL(tb_register_service_driver);
679
680
681
682
683
684
685
686void tb_unregister_service_driver(struct tb_service_driver *drv)
687{
688 driver_unregister(&drv->driver);
689}
690EXPORT_SYMBOL_GPL(tb_unregister_service_driver);
691
692static ssize_t key_show(struct device *dev, struct device_attribute *attr,
693 char *buf)
694{
695 struct tb_service *svc = container_of(dev, struct tb_service, dev);
696
697
698
699
700
701 return sprintf(buf, "%*pE\n", (int)strlen(svc->key), svc->key);
702}
703static DEVICE_ATTR_RO(key);
704
705static int get_modalias(struct tb_service *svc, char *buf, size_t size)
706{
707 return snprintf(buf, size, "tbsvc:k%sp%08Xv%08Xr%08X", svc->key,
708 svc->prtcid, svc->prtcvers, svc->prtcrevs);
709}
710
711static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
712 char *buf)
713{
714 struct tb_service *svc = container_of(dev, struct tb_service, dev);
715
716
717 get_modalias(svc, buf, PAGE_SIZE - 2);
718 return sprintf(buf, "%s\n", buf);
719}
720static DEVICE_ATTR_RO(modalias);
721
722static ssize_t prtcid_show(struct device *dev, struct device_attribute *attr,
723 char *buf)
724{
725 struct tb_service *svc = container_of(dev, struct tb_service, dev);
726
727 return sprintf(buf, "%u\n", svc->prtcid);
728}
729static DEVICE_ATTR_RO(prtcid);
730
731static ssize_t prtcvers_show(struct device *dev, struct device_attribute *attr,
732 char *buf)
733{
734 struct tb_service *svc = container_of(dev, struct tb_service, dev);
735
736 return sprintf(buf, "%u\n", svc->prtcvers);
737}
738static DEVICE_ATTR_RO(prtcvers);
739
740static ssize_t prtcrevs_show(struct device *dev, struct device_attribute *attr,
741 char *buf)
742{
743 struct tb_service *svc = container_of(dev, struct tb_service, dev);
744
745 return sprintf(buf, "%u\n", svc->prtcrevs);
746}
747static DEVICE_ATTR_RO(prtcrevs);
748
749static ssize_t prtcstns_show(struct device *dev, struct device_attribute *attr,
750 char *buf)
751{
752 struct tb_service *svc = container_of(dev, struct tb_service, dev);
753
754 return sprintf(buf, "0x%08x\n", svc->prtcstns);
755}
756static DEVICE_ATTR_RO(prtcstns);
757
758static struct attribute *tb_service_attrs[] = {
759 &dev_attr_key.attr,
760 &dev_attr_modalias.attr,
761 &dev_attr_prtcid.attr,
762 &dev_attr_prtcvers.attr,
763 &dev_attr_prtcrevs.attr,
764 &dev_attr_prtcstns.attr,
765 NULL,
766};
767
768static const struct attribute_group tb_service_attr_group = {
769 .attrs = tb_service_attrs,
770};
771
772static const struct attribute_group *tb_service_attr_groups[] = {
773 &tb_service_attr_group,
774 NULL,
775};
776
777static int tb_service_uevent(struct device *dev, struct kobj_uevent_env *env)
778{
779 struct tb_service *svc = container_of(dev, struct tb_service, dev);
780 char modalias[64];
781
782 get_modalias(svc, modalias, sizeof(modalias));
783 return add_uevent_var(env, "MODALIAS=%s", modalias);
784}
785
786static void tb_service_release(struct device *dev)
787{
788 struct tb_service *svc = container_of(dev, struct tb_service, dev);
789 struct tb_xdomain *xd = tb_service_parent(svc);
790
791 tb_service_debugfs_remove(svc);
792 ida_simple_remove(&xd->service_ids, svc->id);
793 kfree(svc->key);
794 kfree(svc);
795}
796
797struct device_type tb_service_type = {
798 .name = "thunderbolt_service",
799 .groups = tb_service_attr_groups,
800 .uevent = tb_service_uevent,
801 .release = tb_service_release,
802};
803EXPORT_SYMBOL_GPL(tb_service_type);
804
805static int remove_missing_service(struct device *dev, void *data)
806{
807 struct tb_xdomain *xd = data;
808 struct tb_service *svc;
809
810 svc = tb_to_service(dev);
811 if (!svc)
812 return 0;
813
814 if (!tb_property_find(xd->properties, svc->key,
815 TB_PROPERTY_TYPE_DIRECTORY))
816 device_unregister(dev);
817
818 return 0;
819}
820
821static int find_service(struct device *dev, void *data)
822{
823 const struct tb_property *p = data;
824 struct tb_service *svc;
825
826 svc = tb_to_service(dev);
827 if (!svc)
828 return 0;
829
830 return !strcmp(svc->key, p->key);
831}
832
833static int populate_service(struct tb_service *svc,
834 struct tb_property *property)
835{
836 struct tb_property_dir *dir = property->value.dir;
837 struct tb_property *p;
838
839
840 p = tb_property_find(dir, "prtcid", TB_PROPERTY_TYPE_VALUE);
841 if (p)
842 svc->prtcid = p->value.immediate;
843 p = tb_property_find(dir, "prtcvers", TB_PROPERTY_TYPE_VALUE);
844 if (p)
845 svc->prtcvers = p->value.immediate;
846 p = tb_property_find(dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE);
847 if (p)
848 svc->prtcrevs = p->value.immediate;
849 p = tb_property_find(dir, "prtcstns", TB_PROPERTY_TYPE_VALUE);
850 if (p)
851 svc->prtcstns = p->value.immediate;
852
853 svc->key = kstrdup(property->key, GFP_KERNEL);
854 if (!svc->key)
855 return -ENOMEM;
856
857 return 0;
858}
859
860static void enumerate_services(struct tb_xdomain *xd)
861{
862 struct tb_service *svc;
863 struct tb_property *p;
864 struct device *dev;
865 int id;
866
867
868
869
870
871 device_for_each_child_reverse(&xd->dev, xd, remove_missing_service);
872
873
874 tb_property_for_each(xd->properties, p) {
875 if (p->type != TB_PROPERTY_TYPE_DIRECTORY)
876 continue;
877
878
879 dev = device_find_child(&xd->dev, p, find_service);
880 if (dev) {
881 put_device(dev);
882 continue;
883 }
884
885 svc = kzalloc(sizeof(*svc), GFP_KERNEL);
886 if (!svc)
887 break;
888
889 if (populate_service(svc, p)) {
890 kfree(svc);
891 break;
892 }
893
894 id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
895 if (id < 0) {
896 kfree(svc->key);
897 kfree(svc);
898 break;
899 }
900 svc->id = id;
901 svc->dev.bus = &tb_bus_type;
902 svc->dev.type = &tb_service_type;
903 svc->dev.parent = &xd->dev;
904 dev_set_name(&svc->dev, "%s.%d", dev_name(&xd->dev), svc->id);
905
906 tb_service_debugfs_init(svc);
907
908 if (device_register(&svc->dev)) {
909 put_device(&svc->dev);
910 break;
911 }
912 }
913}
914
915static int populate_properties(struct tb_xdomain *xd,
916 struct tb_property_dir *dir)
917{
918 const struct tb_property *p;
919
920
921 p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
922 if (!p)
923 return -EINVAL;
924 xd->device = p->value.immediate;
925
926 p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE);
927 if (!p)
928 return -EINVAL;
929 xd->vendor = p->value.immediate;
930
931 kfree(xd->device_name);
932 xd->device_name = NULL;
933 kfree(xd->vendor_name);
934 xd->vendor_name = NULL;
935
936
937 p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
938 if (p)
939 xd->device_name = kstrdup(p->value.text, GFP_KERNEL);
940 p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT);
941 if (p)
942 xd->vendor_name = kstrdup(p->value.text, GFP_KERNEL);
943
944 return 0;
945}
946
947
948static void tb_xdomain_restore_paths(struct tb_xdomain *xd)
949{
950 if (!xd->resume)
951 return;
952
953 xd->resume = false;
954 if (xd->transmit_path) {
955 dev_dbg(&xd->dev, "re-establishing DMA path\n");
956 tb_domain_approve_xdomain_paths(xd->tb, xd);
957 }
958}
959
960static inline struct tb_switch *tb_xdomain_parent(struct tb_xdomain *xd)
961{
962 return tb_to_switch(xd->dev.parent);
963}
964
965static int tb_xdomain_update_link_attributes(struct tb_xdomain *xd)
966{
967 bool change = false;
968 struct tb_port *port;
969 int ret;
970
971 port = tb_port_at(xd->route, tb_xdomain_parent(xd));
972
973 ret = tb_port_get_link_speed(port);
974 if (ret < 0)
975 return ret;
976
977 if (xd->link_speed != ret)
978 change = true;
979
980 xd->link_speed = ret;
981
982 ret = tb_port_get_link_width(port);
983 if (ret < 0)
984 return ret;
985
986 if (xd->link_width != ret)
987 change = true;
988
989 xd->link_width = ret;
990
991 if (change)
992 kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE);
993
994 return 0;
995}
996
997static void tb_xdomain_get_uuid(struct work_struct *work)
998{
999 struct tb_xdomain *xd = container_of(work, typeof(*xd),
1000 get_uuid_work.work);
1001 struct tb *tb = xd->tb;
1002 uuid_t uuid;
1003 int ret;
1004
1005 ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->uuid_retries, &uuid);
1006 if (ret < 0) {
1007 if (xd->uuid_retries-- > 0) {
1008 queue_delayed_work(xd->tb->wq, &xd->get_uuid_work,
1009 msecs_to_jiffies(100));
1010 } else {
1011 dev_dbg(&xd->dev, "failed to read remote UUID\n");
1012 }
1013 return;
1014 }
1015
1016 if (uuid_equal(&uuid, xd->local_uuid))
1017 dev_dbg(&xd->dev, "intra-domain loop detected\n");
1018
1019
1020
1021
1022
1023
1024 if (xd->remote_uuid && !uuid_equal(&uuid, xd->remote_uuid)) {
1025 dev_dbg(&xd->dev, "remote UUID is different, unplugging\n");
1026 xd->is_unplugged = true;
1027 return;
1028 }
1029
1030
1031 if (!xd->remote_uuid) {
1032 xd->remote_uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL);
1033 if (!xd->remote_uuid)
1034 return;
1035 }
1036
1037
1038 queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
1039 msecs_to_jiffies(100));
1040 queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
1041 msecs_to_jiffies(1000));
1042}
1043
1044static void tb_xdomain_get_properties(struct work_struct *work)
1045{
1046 struct tb_xdomain *xd = container_of(work, typeof(*xd),
1047 get_properties_work.work);
1048 struct tb_property_dir *dir;
1049 struct tb *tb = xd->tb;
1050 bool update = false;
1051 u32 *block = NULL;
1052 u32 gen = 0;
1053 int ret;
1054
1055 ret = tb_xdp_properties_request(tb->ctl, xd->route, xd->local_uuid,
1056 xd->remote_uuid, xd->properties_retries,
1057 &block, &gen);
1058 if (ret < 0) {
1059 if (xd->properties_retries-- > 0) {
1060 queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
1061 msecs_to_jiffies(1000));
1062 } else {
1063
1064 dev_err(&xd->dev,
1065 "failed read XDomain properties from %pUb\n",
1066 xd->remote_uuid);
1067 }
1068 return;
1069 }
1070
1071 xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
1072
1073 mutex_lock(&xd->lock);
1074
1075
1076 if (xd->properties && gen <= xd->property_block_gen) {
1077
1078
1079
1080
1081
1082
1083 tb_xdomain_restore_paths(xd);
1084 goto err_free_block;
1085 }
1086
1087 dir = tb_property_parse_dir(block, ret);
1088 if (!dir) {
1089 dev_err(&xd->dev, "failed to parse XDomain properties\n");
1090 goto err_free_block;
1091 }
1092
1093 ret = populate_properties(xd, dir);
1094 if (ret) {
1095 dev_err(&xd->dev, "missing XDomain properties in response\n");
1096 goto err_free_dir;
1097 }
1098
1099
1100 if (xd->properties) {
1101 tb_property_free_dir(xd->properties);
1102 update = true;
1103 }
1104
1105 xd->properties = dir;
1106 xd->property_block_gen = gen;
1107
1108 tb_xdomain_update_link_attributes(xd);
1109
1110 tb_xdomain_restore_paths(xd);
1111
1112 mutex_unlock(&xd->lock);
1113
1114 kfree(block);
1115
1116
1117
1118
1119
1120
1121 if (!update) {
1122 if (device_add(&xd->dev)) {
1123 dev_err(&xd->dev, "failed to add XDomain device\n");
1124 return;
1125 }
1126 } else {
1127 kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE);
1128 }
1129
1130 enumerate_services(xd);
1131 return;
1132
1133err_free_dir:
1134 tb_property_free_dir(dir);
1135err_free_block:
1136 kfree(block);
1137 mutex_unlock(&xd->lock);
1138}
1139
1140static void tb_xdomain_properties_changed(struct work_struct *work)
1141{
1142 struct tb_xdomain *xd = container_of(work, typeof(*xd),
1143 properties_changed_work.work);
1144 int ret;
1145
1146 ret = tb_xdp_properties_changed_request(xd->tb->ctl, xd->route,
1147 xd->properties_changed_retries, xd->local_uuid);
1148 if (ret) {
1149 if (xd->properties_changed_retries-- > 0)
1150 queue_delayed_work(xd->tb->wq,
1151 &xd->properties_changed_work,
1152 msecs_to_jiffies(1000));
1153 return;
1154 }
1155
1156 xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
1157}
1158
1159static ssize_t device_show(struct device *dev, struct device_attribute *attr,
1160 char *buf)
1161{
1162 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1163
1164 return sprintf(buf, "%#x\n", xd->device);
1165}
1166static DEVICE_ATTR_RO(device);
1167
1168static ssize_t
1169device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1170{
1171 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1172 int ret;
1173
1174 if (mutex_lock_interruptible(&xd->lock))
1175 return -ERESTARTSYS;
1176 ret = sprintf(buf, "%s\n", xd->device_name ? xd->device_name : "");
1177 mutex_unlock(&xd->lock);
1178
1179 return ret;
1180}
1181static DEVICE_ATTR_RO(device_name);
1182
1183static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
1184 char *buf)
1185{
1186 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1187
1188 return sprintf(buf, "%#x\n", xd->vendor);
1189}
1190static DEVICE_ATTR_RO(vendor);
1191
1192static ssize_t
1193vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1194{
1195 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1196 int ret;
1197
1198 if (mutex_lock_interruptible(&xd->lock))
1199 return -ERESTARTSYS;
1200 ret = sprintf(buf, "%s\n", xd->vendor_name ? xd->vendor_name : "");
1201 mutex_unlock(&xd->lock);
1202
1203 return ret;
1204}
1205static DEVICE_ATTR_RO(vendor_name);
1206
1207static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
1208 char *buf)
1209{
1210 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1211
1212 return sprintf(buf, "%pUb\n", xd->remote_uuid);
1213}
1214static DEVICE_ATTR_RO(unique_id);
1215
1216static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
1217 char *buf)
1218{
1219 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1220
1221 return sprintf(buf, "%u.0 Gb/s\n", xd->link_speed);
1222}
1223
1224static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
1225static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
1226
1227static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
1228 char *buf)
1229{
1230 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1231
1232 return sprintf(buf, "%u\n", xd->link_width);
1233}
1234
1235static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
1236static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
1237
1238static struct attribute *xdomain_attrs[] = {
1239 &dev_attr_device.attr,
1240 &dev_attr_device_name.attr,
1241 &dev_attr_rx_lanes.attr,
1242 &dev_attr_rx_speed.attr,
1243 &dev_attr_tx_lanes.attr,
1244 &dev_attr_tx_speed.attr,
1245 &dev_attr_unique_id.attr,
1246 &dev_attr_vendor.attr,
1247 &dev_attr_vendor_name.attr,
1248 NULL,
1249};
1250
1251static const struct attribute_group xdomain_attr_group = {
1252 .attrs = xdomain_attrs,
1253};
1254
1255static const struct attribute_group *xdomain_attr_groups[] = {
1256 &xdomain_attr_group,
1257 NULL,
1258};
1259
1260static void tb_xdomain_release(struct device *dev)
1261{
1262 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1263
1264 put_device(xd->dev.parent);
1265
1266 tb_property_free_dir(xd->properties);
1267 ida_destroy(&xd->service_ids);
1268
1269 kfree(xd->local_uuid);
1270 kfree(xd->remote_uuid);
1271 kfree(xd->device_name);
1272 kfree(xd->vendor_name);
1273 kfree(xd);
1274}
1275
1276static void start_handshake(struct tb_xdomain *xd)
1277{
1278 xd->uuid_retries = XDOMAIN_UUID_RETRIES;
1279 xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
1280 xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
1281
1282 if (xd->needs_uuid) {
1283 queue_delayed_work(xd->tb->wq, &xd->get_uuid_work,
1284 msecs_to_jiffies(100));
1285 } else {
1286
1287 queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
1288 msecs_to_jiffies(100));
1289 queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
1290 msecs_to_jiffies(1000));
1291 }
1292}
1293
1294static void stop_handshake(struct tb_xdomain *xd)
1295{
1296 xd->uuid_retries = 0;
1297 xd->properties_retries = 0;
1298 xd->properties_changed_retries = 0;
1299
1300 cancel_delayed_work_sync(&xd->get_uuid_work);
1301 cancel_delayed_work_sync(&xd->get_properties_work);
1302 cancel_delayed_work_sync(&xd->properties_changed_work);
1303}
1304
1305static int __maybe_unused tb_xdomain_suspend(struct device *dev)
1306{
1307 stop_handshake(tb_to_xdomain(dev));
1308 return 0;
1309}
1310
1311static int __maybe_unused tb_xdomain_resume(struct device *dev)
1312{
1313 struct tb_xdomain *xd = tb_to_xdomain(dev);
1314
1315
1316
1317
1318
1319 xd->resume = true;
1320 start_handshake(xd);
1321
1322 return 0;
1323}
1324
1325static const struct dev_pm_ops tb_xdomain_pm_ops = {
1326 SET_SYSTEM_SLEEP_PM_OPS(tb_xdomain_suspend, tb_xdomain_resume)
1327};
1328
1329struct device_type tb_xdomain_type = {
1330 .name = "thunderbolt_xdomain",
1331 .release = tb_xdomain_release,
1332 .pm = &tb_xdomain_pm_ops,
1333};
1334EXPORT_SYMBOL_GPL(tb_xdomain_type);
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
1349 u64 route, const uuid_t *local_uuid,
1350 const uuid_t *remote_uuid)
1351{
1352 struct tb_switch *parent_sw = tb_to_switch(parent);
1353 struct tb_xdomain *xd;
1354 struct tb_port *down;
1355
1356
1357 down = tb_port_at(route, parent_sw);
1358 tb_port_unlock(down);
1359
1360 xd = kzalloc(sizeof(*xd), GFP_KERNEL);
1361 if (!xd)
1362 return NULL;
1363
1364 xd->tb = tb;
1365 xd->route = route;
1366 ida_init(&xd->service_ids);
1367 mutex_init(&xd->lock);
1368 INIT_DELAYED_WORK(&xd->get_uuid_work, tb_xdomain_get_uuid);
1369 INIT_DELAYED_WORK(&xd->get_properties_work, tb_xdomain_get_properties);
1370 INIT_DELAYED_WORK(&xd->properties_changed_work,
1371 tb_xdomain_properties_changed);
1372
1373 xd->local_uuid = kmemdup(local_uuid, sizeof(uuid_t), GFP_KERNEL);
1374 if (!xd->local_uuid)
1375 goto err_free;
1376
1377 if (remote_uuid) {
1378 xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t),
1379 GFP_KERNEL);
1380 if (!xd->remote_uuid)
1381 goto err_free_local_uuid;
1382 } else {
1383 xd->needs_uuid = true;
1384 }
1385
1386 device_initialize(&xd->dev);
1387 xd->dev.parent = get_device(parent);
1388 xd->dev.bus = &tb_bus_type;
1389 xd->dev.type = &tb_xdomain_type;
1390 xd->dev.groups = xdomain_attr_groups;
1391 dev_set_name(&xd->dev, "%u-%llx", tb->index, route);
1392
1393
1394
1395
1396
1397 pm_runtime_set_active(&xd->dev);
1398 pm_runtime_get_noresume(&xd->dev);
1399 pm_runtime_enable(&xd->dev);
1400
1401 return xd;
1402
1403err_free_local_uuid:
1404 kfree(xd->local_uuid);
1405err_free:
1406 kfree(xd);
1407
1408 return NULL;
1409}
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420void tb_xdomain_add(struct tb_xdomain *xd)
1421{
1422
1423 start_handshake(xd);
1424}
1425
1426static int unregister_service(struct device *dev, void *data)
1427{
1428 device_unregister(dev);
1429 return 0;
1430}
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440void tb_xdomain_remove(struct tb_xdomain *xd)
1441{
1442 stop_handshake(xd);
1443
1444 device_for_each_child_reverse(&xd->dev, xd, unregister_service);
1445
1446
1447
1448
1449
1450
1451 pm_runtime_disable(&xd->dev);
1452 pm_runtime_put_noidle(&xd->dev);
1453 pm_runtime_set_suspended(&xd->dev);
1454
1455 if (!device_is_registered(&xd->dev))
1456 put_device(&xd->dev);
1457 else
1458 device_unregister(&xd->dev);
1459}
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd)
1472{
1473 struct tb_port *port;
1474 int ret;
1475
1476 port = tb_port_at(xd->route, tb_xdomain_parent(xd));
1477 if (!port->dual_link_port)
1478 return -ENODEV;
1479
1480 ret = tb_port_enable(port->dual_link_port);
1481 if (ret)
1482 return ret;
1483
1484 ret = tb_wait_for_port(port->dual_link_port, true);
1485 if (ret < 0)
1486 return ret;
1487 if (!ret)
1488 return -ENOTCONN;
1489
1490 ret = tb_port_lane_bonding_enable(port);
1491 if (ret) {
1492 tb_port_warn(port, "failed to enable lane bonding\n");
1493 return ret;
1494 }
1495
1496 tb_xdomain_update_link_attributes(xd);
1497
1498 dev_dbg(&xd->dev, "lane bonding enabled\n");
1499 return 0;
1500}
1501EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_enable);
1502
1503
1504
1505
1506
1507
1508
1509
1510void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd)
1511{
1512 struct tb_port *port;
1513
1514 port = tb_port_at(xd->route, tb_xdomain_parent(xd));
1515 if (port->dual_link_port) {
1516 tb_port_lane_bonding_disable(port);
1517 tb_port_disable(port->dual_link_port);
1518 tb_xdomain_update_link_attributes(xd);
1519
1520 dev_dbg(&xd->dev, "lane bonding disabled\n");
1521 }
1522}
1523EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_disable);
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path,
1542 u16 transmit_ring, u16 receive_path,
1543 u16 receive_ring)
1544{
1545 int ret;
1546
1547 mutex_lock(&xd->lock);
1548
1549 if (xd->transmit_path) {
1550 ret = xd->transmit_path == transmit_path ? 0 : -EBUSY;
1551 goto exit_unlock;
1552 }
1553
1554 xd->transmit_path = transmit_path;
1555 xd->transmit_ring = transmit_ring;
1556 xd->receive_path = receive_path;
1557 xd->receive_ring = receive_ring;
1558
1559 ret = tb_domain_approve_xdomain_paths(xd->tb, xd);
1560
1561exit_unlock:
1562 mutex_unlock(&xd->lock);
1563
1564 return ret;
1565}
1566EXPORT_SYMBOL_GPL(tb_xdomain_enable_paths);
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577int tb_xdomain_disable_paths(struct tb_xdomain *xd)
1578{
1579 int ret = 0;
1580
1581 mutex_lock(&xd->lock);
1582 if (xd->transmit_path) {
1583 xd->transmit_path = 0;
1584 xd->transmit_ring = 0;
1585 xd->receive_path = 0;
1586 xd->receive_ring = 0;
1587
1588 ret = tb_domain_disconnect_xdomain_paths(xd->tb, xd);
1589 }
1590 mutex_unlock(&xd->lock);
1591
1592 return ret;
1593}
1594EXPORT_SYMBOL_GPL(tb_xdomain_disable_paths);
1595
1596struct tb_xdomain_lookup {
1597 const uuid_t *uuid;
1598 u8 link;
1599 u8 depth;
1600 u64 route;
1601};
1602
1603static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw,
1604 const struct tb_xdomain_lookup *lookup)
1605{
1606 struct tb_port *port;
1607
1608 tb_switch_for_each_port(sw, port) {
1609 struct tb_xdomain *xd;
1610
1611 if (port->xdomain) {
1612 xd = port->xdomain;
1613
1614 if (lookup->uuid) {
1615 if (xd->remote_uuid &&
1616 uuid_equal(xd->remote_uuid, lookup->uuid))
1617 return xd;
1618 } else if (lookup->link &&
1619 lookup->link == xd->link &&
1620 lookup->depth == xd->depth) {
1621 return xd;
1622 } else if (lookup->route &&
1623 lookup->route == xd->route) {
1624 return xd;
1625 }
1626 } else if (tb_port_has_remote(port)) {
1627 xd = switch_find_xdomain(port->remote->sw, lookup);
1628 if (xd)
1629 return xd;
1630 }
1631 }
1632
1633 return NULL;
1634}
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid)
1652{
1653 struct tb_xdomain_lookup lookup;
1654 struct tb_xdomain *xd;
1655
1656 memset(&lookup, 0, sizeof(lookup));
1657 lookup.uuid = uuid;
1658
1659 xd = switch_find_xdomain(tb->root_switch, &lookup);
1660 return tb_xdomain_get(xd);
1661}
1662EXPORT_SYMBOL_GPL(tb_xdomain_find_by_uuid);
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
1681 u8 depth)
1682{
1683 struct tb_xdomain_lookup lookup;
1684 struct tb_xdomain *xd;
1685
1686 memset(&lookup, 0, sizeof(lookup));
1687 lookup.link = link;
1688 lookup.depth = depth;
1689
1690 xd = switch_find_xdomain(tb->root_switch, &lookup);
1691 return tb_xdomain_get(xd);
1692}
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route)
1710{
1711 struct tb_xdomain_lookup lookup;
1712 struct tb_xdomain *xd;
1713
1714 memset(&lookup, 0, sizeof(lookup));
1715 lookup.route = route;
1716
1717 xd = switch_find_xdomain(tb->root_switch, &lookup);
1718 return tb_xdomain_get(xd);
1719}
1720EXPORT_SYMBOL_GPL(tb_xdomain_find_by_route);
1721
1722bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type,
1723 const void *buf, size_t size)
1724{
1725 const struct tb_protocol_handler *handler, *tmp;
1726 const struct tb_xdp_header *hdr = buf;
1727 unsigned int length;
1728 int ret = 0;
1729
1730
1731 length = hdr->xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK;
1732 if (length != size / 4 - sizeof(hdr->xd_hdr) / 4)
1733 return true;
1734 if (length < sizeof(*hdr) / 4 - sizeof(hdr->xd_hdr) / 4)
1735 return true;
1736
1737
1738
1739
1740
1741
1742 if (uuid_equal(&hdr->uuid, &tb_xdp_uuid)) {
1743 if (type == TB_CFG_PKG_XDOMAIN_REQ)
1744 return tb_xdp_schedule_request(tb, hdr, size);
1745 return false;
1746 }
1747
1748 mutex_lock(&xdomain_lock);
1749 list_for_each_entry_safe(handler, tmp, &protocol_handlers, list) {
1750 if (!uuid_equal(&hdr->uuid, handler->uuid))
1751 continue;
1752
1753 mutex_unlock(&xdomain_lock);
1754 ret = handler->callback(buf, size, handler->data);
1755 mutex_lock(&xdomain_lock);
1756
1757 if (ret)
1758 break;
1759 }
1760 mutex_unlock(&xdomain_lock);
1761
1762 return ret > 0;
1763}
1764
1765static int update_xdomain(struct device *dev, void *data)
1766{
1767 struct tb_xdomain *xd;
1768
1769 xd = tb_to_xdomain(dev);
1770 if (xd) {
1771 queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
1772 msecs_to_jiffies(50));
1773 }
1774
1775 return 0;
1776}
1777
1778static void update_all_xdomains(void)
1779{
1780 bus_for_each_dev(&tb_bus_type, NULL, NULL, update_xdomain);
1781}
1782
1783static bool remove_directory(const char *key, const struct tb_property_dir *dir)
1784{
1785 struct tb_property *p;
1786
1787 p = tb_property_find(xdomain_property_dir, key,
1788 TB_PROPERTY_TYPE_DIRECTORY);
1789 if (p && p->value.dir == dir) {
1790 tb_property_remove(p);
1791 return true;
1792 }
1793 return false;
1794}
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808int tb_register_property_dir(const char *key, struct tb_property_dir *dir)
1809{
1810 int ret;
1811
1812 if (WARN_ON(!xdomain_property_dir))
1813 return -EAGAIN;
1814
1815 if (!key || strlen(key) > 8)
1816 return -EINVAL;
1817
1818 mutex_lock(&xdomain_lock);
1819 if (tb_property_find(xdomain_property_dir, key,
1820 TB_PROPERTY_TYPE_DIRECTORY)) {
1821 ret = -EEXIST;
1822 goto err_unlock;
1823 }
1824
1825 ret = tb_property_add_dir(xdomain_property_dir, key, dir);
1826 if (ret)
1827 goto err_unlock;
1828
1829 ret = rebuild_property_block();
1830 if (ret) {
1831 remove_directory(key, dir);
1832 goto err_unlock;
1833 }
1834
1835 mutex_unlock(&xdomain_lock);
1836 update_all_xdomains();
1837 return 0;
1838
1839err_unlock:
1840 mutex_unlock(&xdomain_lock);
1841 return ret;
1842}
1843EXPORT_SYMBOL_GPL(tb_register_property_dir);
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir)
1854{
1855 int ret = 0;
1856
1857 mutex_lock(&xdomain_lock);
1858 if (remove_directory(key, dir))
1859 ret = rebuild_property_block();
1860 mutex_unlock(&xdomain_lock);
1861
1862 if (!ret)
1863 update_all_xdomains();
1864}
1865EXPORT_SYMBOL_GPL(tb_unregister_property_dir);
1866
1867int tb_xdomain_init(void)
1868{
1869 xdomain_property_dir = tb_property_create_dir(NULL);
1870 if (!xdomain_property_dir)
1871 return -ENOMEM;
1872
1873
1874
1875
1876
1877
1878
1879
1880 tb_property_add_immediate(xdomain_property_dir, "vendorid",
1881 PCI_VENDOR_ID_INTEL);
1882 tb_property_add_text(xdomain_property_dir, "vendorid", "Intel Corp.");
1883 tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1);
1884 tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100);
1885
1886 return 0;
1887}
1888
1889void tb_xdomain_exit(void)
1890{
1891 kfree(xdomain_property_block);
1892 tb_property_free_dir(xdomain_property_dir);
1893}
1894