linux/drivers/thunderbolt/xdomain.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Thunderbolt XDomain discovery protocol support
   4 *
   5 * Copyright (C) 2017, Intel Corporation
   6 * Authors: Michael Jamet <michael.jamet@intel.com>
   7 *          Mika Westerberg <mika.westerberg@linux.intel.com>
   8 */
   9
  10#include <linux/device.h>
  11#include <linux/kmod.h>
  12#include <linux/module.h>
  13#include <linux/pm_runtime.h>
  14#include <linux/utsname.h>
  15#include <linux/uuid.h>
  16#include <linux/workqueue.h>
  17
  18#include "tb.h"
  19
  20#define XDOMAIN_DEFAULT_TIMEOUT                 5000 /* ms */
  21#define XDOMAIN_UUID_RETRIES                    10
  22#define XDOMAIN_PROPERTIES_RETRIES              60
  23#define XDOMAIN_PROPERTIES_CHANGED_RETRIES      10
  24
  25struct xdomain_request_work {
  26        struct work_struct work;
  27        struct tb_xdp_header *pkg;
  28        struct tb *tb;
  29};
  30
  31/* Serializes access to the properties and protocol handlers below */
  32static DEFINE_MUTEX(xdomain_lock);
  33
  34/* Properties exposed to the remote domains */
  35static struct tb_property_dir *xdomain_property_dir;
  36static u32 *xdomain_property_block;
  37static u32 xdomain_property_block_len;
  38static u32 xdomain_property_block_gen;
  39
  40/* Additional protocol handlers */
  41static LIST_HEAD(protocol_handlers);
  42
  43/* UUID for XDomain discovery protocol: b638d70e-42ff-40bb-97c2-90e2c0b2ff07 */
  44static const uuid_t tb_xdp_uuid =
  45        UUID_INIT(0xb638d70e, 0x42ff, 0x40bb,
  46                  0x97, 0xc2, 0x90, 0xe2, 0xc0, 0xb2, 0xff, 0x07);
  47
  48static bool tb_xdomain_match(const struct tb_cfg_request *req,
  49                             const struct ctl_pkg *pkg)
  50{
  51        switch (pkg->frame.eof) {
  52        case TB_CFG_PKG_ERROR:
  53                return true;
  54
  55        case TB_CFG_PKG_XDOMAIN_RESP: {
  56                const struct tb_xdp_header *res_hdr = pkg->buffer;
  57                const struct tb_xdp_header *req_hdr = req->request;
  58
  59                if (pkg->frame.size < req->response_size / 4)
  60                        return false;
  61
  62                /* Make sure route matches */
  63                if ((res_hdr->xd_hdr.route_hi & ~BIT(31)) !=
  64                     req_hdr->xd_hdr.route_hi)
  65                        return false;
  66                if ((res_hdr->xd_hdr.route_lo) != req_hdr->xd_hdr.route_lo)
  67                        return false;
  68
  69                /* Check that the XDomain protocol matches */
  70                if (!uuid_equal(&res_hdr->uuid, &req_hdr->uuid))
  71                        return false;
  72
  73                return true;
  74        }
  75
  76        default:
  77                return false;
  78        }
  79}
  80
  81static bool tb_xdomain_copy(struct tb_cfg_request *req,
  82                            const struct ctl_pkg *pkg)
  83{
  84        memcpy(req->response, pkg->buffer, req->response_size);
  85        req->result.err = 0;
  86        return true;
  87}
  88
  89static void response_ready(void *data)
  90{
  91        tb_cfg_request_put(data);
  92}
  93
  94static int __tb_xdomain_response(struct tb_ctl *ctl, const void *response,
  95                                 size_t size, enum tb_cfg_pkg_type type)
  96{
  97        struct tb_cfg_request *req;
  98
  99        req = tb_cfg_request_alloc();
 100        if (!req)
 101                return -ENOMEM;
 102
 103        req->match = tb_xdomain_match;
 104        req->copy = tb_xdomain_copy;
 105        req->request = response;
 106        req->request_size = size;
 107        req->request_type = type;
 108
 109        return tb_cfg_request(ctl, req, response_ready, req);
 110}
 111
 112/**
 113 * tb_xdomain_response() - Send a XDomain response message
 114 * @xd: XDomain to send the message
 115 * @response: Response to send
 116 * @size: Size of the response
 117 * @type: PDF type of the response
 118 *
 119 * This can be used to send a XDomain response message to the other
 120 * domain. No response for the message is expected.
 121 *
 122 * Return: %0 in case of success and negative errno in case of failure
 123 */
 124int tb_xdomain_response(struct tb_xdomain *xd, const void *response,
 125                        size_t size, enum tb_cfg_pkg_type type)
 126{
 127        return __tb_xdomain_response(xd->tb->ctl, response, size, type);
 128}
 129EXPORT_SYMBOL_GPL(tb_xdomain_response);
 130
 131static int __tb_xdomain_request(struct tb_ctl *ctl, const void *request,
 132        size_t request_size, enum tb_cfg_pkg_type request_type, void *response,
 133        size_t response_size, enum tb_cfg_pkg_type response_type,
 134        unsigned int timeout_msec)
 135{
 136        struct tb_cfg_request *req;
 137        struct tb_cfg_result res;
 138
 139        req = tb_cfg_request_alloc();
 140        if (!req)
 141                return -ENOMEM;
 142
 143        req->match = tb_xdomain_match;
 144        req->copy = tb_xdomain_copy;
 145        req->request = request;
 146        req->request_size = request_size;
 147        req->request_type = request_type;
 148        req->response = response;
 149        req->response_size = response_size;
 150        req->response_type = response_type;
 151
 152        res = tb_cfg_request_sync(ctl, req, timeout_msec);
 153
 154        tb_cfg_request_put(req);
 155
 156        return res.err == 1 ? -EIO : res.err;
 157}
 158
 159/**
 160 * tb_xdomain_request() - Send a XDomain request
 161 * @xd: XDomain to send the request
 162 * @request: Request to send
 163 * @request_size: Size of the request in bytes
 164 * @request_type: PDF type of the request
 165 * @response: Response is copied here
 166 * @response_size: Expected size of the response in bytes
 167 * @response_type: Expected PDF type of the response
 168 * @timeout_msec: Timeout in milliseconds to wait for the response
 169 *
 170 * This function can be used to send XDomain control channel messages to
 171 * the other domain. The function waits until the response is received
 172 * or when timeout triggers. Whichever comes first.
 173 *
 174 * Return: %0 in case of success and negative errno in case of failure
 175 */
 176int tb_xdomain_request(struct tb_xdomain *xd, const void *request,
 177        size_t request_size, enum tb_cfg_pkg_type request_type,
 178        void *response, size_t response_size,
 179        enum tb_cfg_pkg_type response_type, unsigned int timeout_msec)
 180{
 181        return __tb_xdomain_request(xd->tb->ctl, request, request_size,
 182                                    request_type, response, response_size,
 183                                    response_type, timeout_msec);
 184}
 185EXPORT_SYMBOL_GPL(tb_xdomain_request);
 186
 187static inline void tb_xdp_fill_header(struct tb_xdp_header *hdr, u64 route,
 188        u8 sequence, enum tb_xdp_type type, size_t size)
 189{
 190        u32 length_sn;
 191
 192        length_sn = (size - sizeof(hdr->xd_hdr)) / 4;
 193        length_sn |= (sequence << TB_XDOMAIN_SN_SHIFT) & TB_XDOMAIN_SN_MASK;
 194
 195        hdr->xd_hdr.route_hi = upper_32_bits(route);
 196        hdr->xd_hdr.route_lo = lower_32_bits(route);
 197        hdr->xd_hdr.length_sn = length_sn;
 198        hdr->type = type;
 199        memcpy(&hdr->uuid, &tb_xdp_uuid, sizeof(tb_xdp_uuid));
 200}
 201
 202static int tb_xdp_handle_error(const struct tb_xdp_header *hdr)
 203{
 204        const struct tb_xdp_error_response *error;
 205
 206        if (hdr->type != ERROR_RESPONSE)
 207                return 0;
 208
 209        error = (const struct tb_xdp_error_response *)hdr;
 210
 211        switch (error->error) {
 212        case ERROR_UNKNOWN_PACKET:
 213        case ERROR_UNKNOWN_DOMAIN:
 214                return -EIO;
 215        case ERROR_NOT_SUPPORTED:
 216                return -ENOTSUPP;
 217        case ERROR_NOT_READY:
 218                return -EAGAIN;
 219        default:
 220                break;
 221        }
 222
 223        return 0;
 224}
 225
 226static int tb_xdp_uuid_request(struct tb_ctl *ctl, u64 route, int retry,
 227                               uuid_t *uuid)
 228{
 229        struct tb_xdp_uuid_response res;
 230        struct tb_xdp_uuid req;
 231        int ret;
 232
 233        memset(&req, 0, sizeof(req));
 234        tb_xdp_fill_header(&req.hdr, route, retry % 4, UUID_REQUEST,
 235                           sizeof(req));
 236
 237        memset(&res, 0, sizeof(res));
 238        ret = __tb_xdomain_request(ctl, &req, sizeof(req),
 239                                   TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res),
 240                                   TB_CFG_PKG_XDOMAIN_RESP,
 241                                   XDOMAIN_DEFAULT_TIMEOUT);
 242        if (ret)
 243                return ret;
 244
 245        ret = tb_xdp_handle_error(&res.hdr);
 246        if (ret)
 247                return ret;
 248
 249        uuid_copy(uuid, &res.src_uuid);
 250        return 0;
 251}
 252
 253static int tb_xdp_uuid_response(struct tb_ctl *ctl, u64 route, u8 sequence,
 254                                const uuid_t *uuid)
 255{
 256        struct tb_xdp_uuid_response res;
 257
 258        memset(&res, 0, sizeof(res));
 259        tb_xdp_fill_header(&res.hdr, route, sequence, UUID_RESPONSE,
 260                           sizeof(res));
 261
 262        uuid_copy(&res.src_uuid, uuid);
 263        res.src_route_hi = upper_32_bits(route);
 264        res.src_route_lo = lower_32_bits(route);
 265
 266        return __tb_xdomain_response(ctl, &res, sizeof(res),
 267                                     TB_CFG_PKG_XDOMAIN_RESP);
 268}
 269
 270static int tb_xdp_error_response(struct tb_ctl *ctl, u64 route, u8 sequence,
 271                                 enum tb_xdp_error error)
 272{
 273        struct tb_xdp_error_response res;
 274
 275        memset(&res, 0, sizeof(res));
 276        tb_xdp_fill_header(&res.hdr, route, sequence, ERROR_RESPONSE,
 277                           sizeof(res));
 278        res.error = error;
 279
 280        return __tb_xdomain_response(ctl, &res, sizeof(res),
 281                                     TB_CFG_PKG_XDOMAIN_RESP);
 282}
 283
 284static int tb_xdp_properties_request(struct tb_ctl *ctl, u64 route,
 285        const uuid_t *src_uuid, const uuid_t *dst_uuid, int retry,
 286        u32 **block, u32 *generation)
 287{
 288        struct tb_xdp_properties_response *res;
 289        struct tb_xdp_properties req;
 290        u16 data_len, len;
 291        size_t total_size;
 292        u32 *data = NULL;
 293        int ret;
 294
 295        total_size = sizeof(*res) + TB_XDP_PROPERTIES_MAX_DATA_LENGTH * 4;
 296        res = kzalloc(total_size, GFP_KERNEL);
 297        if (!res)
 298                return -ENOMEM;
 299
 300        memset(&req, 0, sizeof(req));
 301        tb_xdp_fill_header(&req.hdr, route, retry % 4, PROPERTIES_REQUEST,
 302                           sizeof(req));
 303        memcpy(&req.src_uuid, src_uuid, sizeof(*src_uuid));
 304        memcpy(&req.dst_uuid, dst_uuid, sizeof(*dst_uuid));
 305
 306        len = 0;
 307        data_len = 0;
 308
 309        do {
 310                ret = __tb_xdomain_request(ctl, &req, sizeof(req),
 311                                           TB_CFG_PKG_XDOMAIN_REQ, res,
 312                                           total_size, TB_CFG_PKG_XDOMAIN_RESP,
 313                                           XDOMAIN_DEFAULT_TIMEOUT);
 314                if (ret)
 315                        goto err;
 316
 317                ret = tb_xdp_handle_error(&res->hdr);
 318                if (ret)
 319                        goto err;
 320
 321                /*
 322                 * Package length includes the whole payload without the
 323                 * XDomain header. Validate first that the package is at
 324                 * least size of the response structure.
 325                 */
 326                len = res->hdr.xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK;
 327                if (len < sizeof(*res) / 4) {
 328                        ret = -EINVAL;
 329                        goto err;
 330                }
 331
 332                len += sizeof(res->hdr.xd_hdr) / 4;
 333                len -= sizeof(*res) / 4;
 334
 335                if (res->offset != req.offset) {
 336                        ret = -EINVAL;
 337                        goto err;
 338                }
 339
 340                /*
 341                 * First time allocate block that has enough space for
 342                 * the whole properties block.
 343                 */
 344                if (!data) {
 345                        data_len = res->data_length;
 346                        if (data_len > TB_XDP_PROPERTIES_MAX_LENGTH) {
 347                                ret = -E2BIG;
 348                                goto err;
 349                        }
 350
 351                        data = kcalloc(data_len, sizeof(u32), GFP_KERNEL);
 352                        if (!data) {
 353                                ret = -ENOMEM;
 354                                goto err;
 355                        }
 356                }
 357
 358                memcpy(data + req.offset, res->data, len * 4);
 359                req.offset += len;
 360        } while (!data_len || req.offset < data_len);
 361
 362        *block = data;
 363        *generation = res->generation;
 364
 365        kfree(res);
 366
 367        return data_len;
 368
 369err:
 370        kfree(data);
 371        kfree(res);
 372
 373        return ret;
 374}
 375
 376static int tb_xdp_properties_response(struct tb *tb, struct tb_ctl *ctl,
 377        u64 route, u8 sequence, const uuid_t *src_uuid,
 378        const struct tb_xdp_properties *req)
 379{
 380        struct tb_xdp_properties_response *res;
 381        size_t total_size;
 382        u16 len;
 383        int ret;
 384
 385        /*
 386         * Currently we expect all requests to be directed to us. The
 387         * protocol supports forwarding, though which we might add
 388         * support later on.
 389         */
 390        if (!uuid_equal(src_uuid, &req->dst_uuid)) {
 391                tb_xdp_error_response(ctl, route, sequence,
 392                                      ERROR_UNKNOWN_DOMAIN);
 393                return 0;
 394        }
 395
 396        mutex_lock(&xdomain_lock);
 397
 398        if (req->offset >= xdomain_property_block_len) {
 399                mutex_unlock(&xdomain_lock);
 400                return -EINVAL;
 401        }
 402
 403        len = xdomain_property_block_len - req->offset;
 404        len = min_t(u16, len, TB_XDP_PROPERTIES_MAX_DATA_LENGTH);
 405        total_size = sizeof(*res) + len * 4;
 406
 407        res = kzalloc(total_size, GFP_KERNEL);
 408        if (!res) {
 409                mutex_unlock(&xdomain_lock);
 410                return -ENOMEM;
 411        }
 412
 413        tb_xdp_fill_header(&res->hdr, route, sequence, PROPERTIES_RESPONSE,
 414                           total_size);
 415        res->generation = xdomain_property_block_gen;
 416        res->data_length = xdomain_property_block_len;
 417        res->offset = req->offset;
 418        uuid_copy(&res->src_uuid, src_uuid);
 419        uuid_copy(&res->dst_uuid, &req->src_uuid);
 420        memcpy(res->data, &xdomain_property_block[req->offset], len * 4);
 421
 422        mutex_unlock(&xdomain_lock);
 423
 424        ret = __tb_xdomain_response(ctl, res, total_size,
 425                                    TB_CFG_PKG_XDOMAIN_RESP);
 426
 427        kfree(res);
 428        return ret;
 429}
 430
 431static int tb_xdp_properties_changed_request(struct tb_ctl *ctl, u64 route,
 432                                             int retry, const uuid_t *uuid)
 433{
 434        struct tb_xdp_properties_changed_response res;
 435        struct tb_xdp_properties_changed req;
 436        int ret;
 437
 438        memset(&req, 0, sizeof(req));
 439        tb_xdp_fill_header(&req.hdr, route, retry % 4,
 440                           PROPERTIES_CHANGED_REQUEST, sizeof(req));
 441        uuid_copy(&req.src_uuid, uuid);
 442
 443        memset(&res, 0, sizeof(res));
 444        ret = __tb_xdomain_request(ctl, &req, sizeof(req),
 445                                   TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res),
 446                                   TB_CFG_PKG_XDOMAIN_RESP,
 447                                   XDOMAIN_DEFAULT_TIMEOUT);
 448        if (ret)
 449                return ret;
 450
 451        return tb_xdp_handle_error(&res.hdr);
 452}
 453
 454static int
 455tb_xdp_properties_changed_response(struct tb_ctl *ctl, u64 route, u8 sequence)
 456{
 457        struct tb_xdp_properties_changed_response res;
 458
 459        memset(&res, 0, sizeof(res));
 460        tb_xdp_fill_header(&res.hdr, route, sequence,
 461                           PROPERTIES_CHANGED_RESPONSE, sizeof(res));
 462        return __tb_xdomain_response(ctl, &res, sizeof(res),
 463                                     TB_CFG_PKG_XDOMAIN_RESP);
 464}
 465
 466/**
 467 * tb_register_protocol_handler() - Register protocol handler
 468 * @handler: Handler to register
 469 *
 470 * This allows XDomain service drivers to hook into incoming XDomain
 471 * messages. After this function is called the service driver needs to
 472 * be able to handle calls to callback whenever a package with the
 473 * registered protocol is received.
 474 */
 475int tb_register_protocol_handler(struct tb_protocol_handler *handler)
 476{
 477        if (!handler->uuid || !handler->callback)
 478                return -EINVAL;
 479        if (uuid_equal(handler->uuid, &tb_xdp_uuid))
 480                return -EINVAL;
 481
 482        mutex_lock(&xdomain_lock);
 483        list_add_tail(&handler->list, &protocol_handlers);
 484        mutex_unlock(&xdomain_lock);
 485
 486        return 0;
 487}
 488EXPORT_SYMBOL_GPL(tb_register_protocol_handler);
 489
 490/**
 491 * tb_unregister_protocol_handler() - Unregister protocol handler
 492 * @handler: Handler to unregister
 493 *
 494 * Removes the previously registered protocol handler.
 495 */
 496void tb_unregister_protocol_handler(struct tb_protocol_handler *handler)
 497{
 498        mutex_lock(&xdomain_lock);
 499        list_del_init(&handler->list);
 500        mutex_unlock(&xdomain_lock);
 501}
 502EXPORT_SYMBOL_GPL(tb_unregister_protocol_handler);
 503
 504static void tb_xdp_handle_request(struct work_struct *work)
 505{
 506        struct xdomain_request_work *xw = container_of(work, typeof(*xw), work);
 507        const struct tb_xdp_header *pkg = xw->pkg;
 508        const struct tb_xdomain_header *xhdr = &pkg->xd_hdr;
 509        struct tb *tb = xw->tb;
 510        struct tb_ctl *ctl = tb->ctl;
 511        const uuid_t *uuid;
 512        int ret = 0;
 513        u32 sequence;
 514        u64 route;
 515
 516        route = ((u64)xhdr->route_hi << 32 | xhdr->route_lo) & ~BIT_ULL(63);
 517        sequence = xhdr->length_sn & TB_XDOMAIN_SN_MASK;
 518        sequence >>= TB_XDOMAIN_SN_SHIFT;
 519
 520        mutex_lock(&tb->lock);
 521        if (tb->root_switch)
 522                uuid = tb->root_switch->uuid;
 523        else
 524                uuid = NULL;
 525        mutex_unlock(&tb->lock);
 526
 527        if (!uuid) {
 528                tb_xdp_error_response(ctl, route, sequence, ERROR_NOT_READY);
 529                goto out;
 530        }
 531
 532        switch (pkg->type) {
 533        case PROPERTIES_REQUEST:
 534                ret = tb_xdp_properties_response(tb, ctl, route, sequence, uuid,
 535                        (const struct tb_xdp_properties *)pkg);
 536                break;
 537
 538        case PROPERTIES_CHANGED_REQUEST: {
 539                const struct tb_xdp_properties_changed *xchg =
 540                        (const struct tb_xdp_properties_changed *)pkg;
 541                struct tb_xdomain *xd;
 542
 543                ret = tb_xdp_properties_changed_response(ctl, route, sequence);
 544
 545                /*
 546                 * Since the properties have been changed, let's update
 547                 * the xdomain related to this connection as well in
 548                 * case there is a change in services it offers.
 549                 */
 550                xd = tb_xdomain_find_by_uuid_locked(tb, &xchg->src_uuid);
 551                if (xd) {
 552                        queue_delayed_work(tb->wq, &xd->get_properties_work,
 553                                           msecs_to_jiffies(50));
 554                        tb_xdomain_put(xd);
 555                }
 556
 557                break;
 558        }
 559
 560        case UUID_REQUEST_OLD:
 561        case UUID_REQUEST:
 562                ret = tb_xdp_uuid_response(ctl, route, sequence, uuid);
 563                break;
 564
 565        default:
 566                tb_xdp_error_response(ctl, route, sequence,
 567                                      ERROR_NOT_SUPPORTED);
 568                break;
 569        }
 570
 571        if (ret) {
 572                tb_warn(tb, "failed to send XDomain response for %#x\n",
 573                        pkg->type);
 574        }
 575
 576out:
 577        kfree(xw->pkg);
 578        kfree(xw);
 579
 580        tb_domain_put(tb);
 581}
 582
 583static bool
 584tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr,
 585                        size_t size)
 586{
 587        struct xdomain_request_work *xw;
 588
 589        xw = kmalloc(sizeof(*xw), GFP_KERNEL);
 590        if (!xw)
 591                return false;
 592
 593        INIT_WORK(&xw->work, tb_xdp_handle_request);
 594        xw->pkg = kmemdup(hdr, size, GFP_KERNEL);
 595        if (!xw->pkg) {
 596                kfree(xw);
 597                return false;
 598        }
 599        xw->tb = tb_domain_get(tb);
 600
 601        schedule_work(&xw->work);
 602        return true;
 603}
 604
 605/**
 606 * tb_register_service_driver() - Register XDomain service driver
 607 * @drv: Driver to register
 608 *
 609 * Registers new service driver from @drv to the bus.
 610 */
 611int tb_register_service_driver(struct tb_service_driver *drv)
 612{
 613        drv->driver.bus = &tb_bus_type;
 614        return driver_register(&drv->driver);
 615}
 616EXPORT_SYMBOL_GPL(tb_register_service_driver);
 617
 618/**
 619 * tb_unregister_service_driver() - Unregister XDomain service driver
 620 * @xdrv: Driver to unregister
 621 *
 622 * Unregisters XDomain service driver from the bus.
 623 */
 624void tb_unregister_service_driver(struct tb_service_driver *drv)
 625{
 626        driver_unregister(&drv->driver);
 627}
 628EXPORT_SYMBOL_GPL(tb_unregister_service_driver);
 629
 630static ssize_t key_show(struct device *dev, struct device_attribute *attr,
 631                        char *buf)
 632{
 633        struct tb_service *svc = container_of(dev, struct tb_service, dev);
 634
 635        /*
 636         * It should be null terminated but anything else is pretty much
 637         * allowed.
 638         */
 639        return sprintf(buf, "%*pE\n", (int)strlen(svc->key), svc->key);
 640}
 641static DEVICE_ATTR_RO(key);
 642
 643static int get_modalias(struct tb_service *svc, char *buf, size_t size)
 644{
 645        return snprintf(buf, size, "tbsvc:k%sp%08Xv%08Xr%08X", svc->key,
 646                        svc->prtcid, svc->prtcvers, svc->prtcrevs);
 647}
 648
 649static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
 650                             char *buf)
 651{
 652        struct tb_service *svc = container_of(dev, struct tb_service, dev);
 653
 654        /* Full buffer size except new line and null termination */
 655        get_modalias(svc, buf, PAGE_SIZE - 2);
 656        return sprintf(buf, "%s\n", buf);
 657}
 658static DEVICE_ATTR_RO(modalias);
 659
 660static ssize_t prtcid_show(struct device *dev, struct device_attribute *attr,
 661                           char *buf)
 662{
 663        struct tb_service *svc = container_of(dev, struct tb_service, dev);
 664
 665        return sprintf(buf, "%u\n", svc->prtcid);
 666}
 667static DEVICE_ATTR_RO(prtcid);
 668
 669static ssize_t prtcvers_show(struct device *dev, struct device_attribute *attr,
 670                             char *buf)
 671{
 672        struct tb_service *svc = container_of(dev, struct tb_service, dev);
 673
 674        return sprintf(buf, "%u\n", svc->prtcvers);
 675}
 676static DEVICE_ATTR_RO(prtcvers);
 677
 678static ssize_t prtcrevs_show(struct device *dev, struct device_attribute *attr,
 679                             char *buf)
 680{
 681        struct tb_service *svc = container_of(dev, struct tb_service, dev);
 682
 683        return sprintf(buf, "%u\n", svc->prtcrevs);
 684}
 685static DEVICE_ATTR_RO(prtcrevs);
 686
 687static ssize_t prtcstns_show(struct device *dev, struct device_attribute *attr,
 688                             char *buf)
 689{
 690        struct tb_service *svc = container_of(dev, struct tb_service, dev);
 691
 692        return sprintf(buf, "0x%08x\n", svc->prtcstns);
 693}
 694static DEVICE_ATTR_RO(prtcstns);
 695
 696static struct attribute *tb_service_attrs[] = {
 697        &dev_attr_key.attr,
 698        &dev_attr_modalias.attr,
 699        &dev_attr_prtcid.attr,
 700        &dev_attr_prtcvers.attr,
 701        &dev_attr_prtcrevs.attr,
 702        &dev_attr_prtcstns.attr,
 703        NULL,
 704};
 705
 706static struct attribute_group tb_service_attr_group = {
 707        .attrs = tb_service_attrs,
 708};
 709
 710static const struct attribute_group *tb_service_attr_groups[] = {
 711        &tb_service_attr_group,
 712        NULL,
 713};
 714
 715static int tb_service_uevent(struct device *dev, struct kobj_uevent_env *env)
 716{
 717        struct tb_service *svc = container_of(dev, struct tb_service, dev);
 718        char modalias[64];
 719
 720        get_modalias(svc, modalias, sizeof(modalias));
 721        return add_uevent_var(env, "MODALIAS=%s", modalias);
 722}
 723
 724static void tb_service_release(struct device *dev)
 725{
 726        struct tb_service *svc = container_of(dev, struct tb_service, dev);
 727        struct tb_xdomain *xd = tb_service_parent(svc);
 728
 729        ida_simple_remove(&xd->service_ids, svc->id);
 730        kfree(svc->key);
 731        kfree(svc);
 732}
 733
 734struct device_type tb_service_type = {
 735        .name = "thunderbolt_service",
 736        .groups = tb_service_attr_groups,
 737        .uevent = tb_service_uevent,
 738        .release = tb_service_release,
 739};
 740EXPORT_SYMBOL_GPL(tb_service_type);
 741
 742static int remove_missing_service(struct device *dev, void *data)
 743{
 744        struct tb_xdomain *xd = data;
 745        struct tb_service *svc;
 746
 747        svc = tb_to_service(dev);
 748        if (!svc)
 749                return 0;
 750
 751        if (!tb_property_find(xd->properties, svc->key,
 752                              TB_PROPERTY_TYPE_DIRECTORY))
 753                device_unregister(dev);
 754
 755        return 0;
 756}
 757
 758static int find_service(struct device *dev, void *data)
 759{
 760        const struct tb_property *p = data;
 761        struct tb_service *svc;
 762
 763        svc = tb_to_service(dev);
 764        if (!svc)
 765                return 0;
 766
 767        return !strcmp(svc->key, p->key);
 768}
 769
 770static int populate_service(struct tb_service *svc,
 771                            struct tb_property *property)
 772{
 773        struct tb_property_dir *dir = property->value.dir;
 774        struct tb_property *p;
 775
 776        /* Fill in standard properties */
 777        p = tb_property_find(dir, "prtcid", TB_PROPERTY_TYPE_VALUE);
 778        if (p)
 779                svc->prtcid = p->value.immediate;
 780        p = tb_property_find(dir, "prtcvers", TB_PROPERTY_TYPE_VALUE);
 781        if (p)
 782                svc->prtcvers = p->value.immediate;
 783        p = tb_property_find(dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE);
 784        if (p)
 785                svc->prtcrevs = p->value.immediate;
 786        p = tb_property_find(dir, "prtcstns", TB_PROPERTY_TYPE_VALUE);
 787        if (p)
 788                svc->prtcstns = p->value.immediate;
 789
 790        svc->key = kstrdup(property->key, GFP_KERNEL);
 791        if (!svc->key)
 792                return -ENOMEM;
 793
 794        return 0;
 795}
 796
 797static void enumerate_services(struct tb_xdomain *xd)
 798{
 799        struct tb_service *svc;
 800        struct tb_property *p;
 801        struct device *dev;
 802        int id;
 803
 804        /*
 805         * First remove all services that are not available anymore in
 806         * the updated property block.
 807         */
 808        device_for_each_child_reverse(&xd->dev, xd, remove_missing_service);
 809
 810        /* Then re-enumerate properties creating new services as we go */
 811        tb_property_for_each(xd->properties, p) {
 812                if (p->type != TB_PROPERTY_TYPE_DIRECTORY)
 813                        continue;
 814
 815                /* If the service exists already we are fine */
 816                dev = device_find_child(&xd->dev, p, find_service);
 817                if (dev) {
 818                        put_device(dev);
 819                        continue;
 820                }
 821
 822                svc = kzalloc(sizeof(*svc), GFP_KERNEL);
 823                if (!svc)
 824                        break;
 825
 826                if (populate_service(svc, p)) {
 827                        kfree(svc);
 828                        break;
 829                }
 830
 831                id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
 832                if (id < 0) {
 833                        kfree(svc);
 834                        break;
 835                }
 836                svc->id = id;
 837                svc->dev.bus = &tb_bus_type;
 838                svc->dev.type = &tb_service_type;
 839                svc->dev.parent = &xd->dev;
 840                dev_set_name(&svc->dev, "%s.%d", dev_name(&xd->dev), svc->id);
 841
 842                if (device_register(&svc->dev)) {
 843                        put_device(&svc->dev);
 844                        break;
 845                }
 846        }
 847}
 848
 849static int populate_properties(struct tb_xdomain *xd,
 850                               struct tb_property_dir *dir)
 851{
 852        const struct tb_property *p;
 853
 854        /* Required properties */
 855        p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
 856        if (!p)
 857                return -EINVAL;
 858        xd->device = p->value.immediate;
 859
 860        p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE);
 861        if (!p)
 862                return -EINVAL;
 863        xd->vendor = p->value.immediate;
 864
 865        kfree(xd->device_name);
 866        xd->device_name = NULL;
 867        kfree(xd->vendor_name);
 868        xd->vendor_name = NULL;
 869
 870        /* Optional properties */
 871        p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
 872        if (p)
 873                xd->device_name = kstrdup(p->value.text, GFP_KERNEL);
 874        p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT);
 875        if (p)
 876                xd->vendor_name = kstrdup(p->value.text, GFP_KERNEL);
 877
 878        return 0;
 879}
 880
 881/* Called with @xd->lock held */
 882static void tb_xdomain_restore_paths(struct tb_xdomain *xd)
 883{
 884        if (!xd->resume)
 885                return;
 886
 887        xd->resume = false;
 888        if (xd->transmit_path) {
 889                dev_dbg(&xd->dev, "re-establishing DMA path\n");
 890                tb_domain_approve_xdomain_paths(xd->tb, xd);
 891        }
 892}
 893
 894static void tb_xdomain_get_uuid(struct work_struct *work)
 895{
 896        struct tb_xdomain *xd = container_of(work, typeof(*xd),
 897                                             get_uuid_work.work);
 898        struct tb *tb = xd->tb;
 899        uuid_t uuid;
 900        int ret;
 901
 902        ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->uuid_retries, &uuid);
 903        if (ret < 0) {
 904                if (xd->uuid_retries-- > 0) {
 905                        queue_delayed_work(xd->tb->wq, &xd->get_uuid_work,
 906                                           msecs_to_jiffies(100));
 907                } else {
 908                        dev_dbg(&xd->dev, "failed to read remote UUID\n");
 909                }
 910                return;
 911        }
 912
 913        if (uuid_equal(&uuid, xd->local_uuid)) {
 914                dev_dbg(&xd->dev, "intra-domain loop detected\n");
 915                return;
 916        }
 917
 918        /*
 919         * If the UUID is different, there is another domain connected
 920         * so mark this one unplugged and wait for the connection
 921         * manager to replace it.
 922         */
 923        if (xd->remote_uuid && !uuid_equal(&uuid, xd->remote_uuid)) {
 924                dev_dbg(&xd->dev, "remote UUID is different, unplugging\n");
 925                xd->is_unplugged = true;
 926                return;
 927        }
 928
 929        /* First time fill in the missing UUID */
 930        if (!xd->remote_uuid) {
 931                xd->remote_uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL);
 932                if (!xd->remote_uuid)
 933                        return;
 934        }
 935
 936        /* Now we can start the normal properties exchange */
 937        queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
 938                           msecs_to_jiffies(100));
 939        queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
 940                           msecs_to_jiffies(1000));
 941}
 942
 943static void tb_xdomain_get_properties(struct work_struct *work)
 944{
 945        struct tb_xdomain *xd = container_of(work, typeof(*xd),
 946                                             get_properties_work.work);
 947        struct tb_property_dir *dir;
 948        struct tb *tb = xd->tb;
 949        bool update = false;
 950        u32 *block = NULL;
 951        u32 gen = 0;
 952        int ret;
 953
 954        ret = tb_xdp_properties_request(tb->ctl, xd->route, xd->local_uuid,
 955                                        xd->remote_uuid, xd->properties_retries,
 956                                        &block, &gen);
 957        if (ret < 0) {
 958                if (xd->properties_retries-- > 0) {
 959                        queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
 960                                           msecs_to_jiffies(1000));
 961                } else {
 962                        /* Give up now */
 963                        dev_err(&xd->dev,
 964                                "failed read XDomain properties from %pUb\n",
 965                                xd->remote_uuid);
 966                }
 967                return;
 968        }
 969
 970        xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
 971
 972        mutex_lock(&xd->lock);
 973
 974        /* Only accept newer generation properties */
 975        if (xd->properties && gen <= xd->property_block_gen) {
 976                /*
 977                 * On resume it is likely that the properties block is
 978                 * not changed (unless the other end added or removed
 979                 * services). However, we need to make sure the existing
 980                 * DMA paths are restored properly.
 981                 */
 982                tb_xdomain_restore_paths(xd);
 983                goto err_free_block;
 984        }
 985
 986        dir = tb_property_parse_dir(block, ret);
 987        if (!dir) {
 988                dev_err(&xd->dev, "failed to parse XDomain properties\n");
 989                goto err_free_block;
 990        }
 991
 992        ret = populate_properties(xd, dir);
 993        if (ret) {
 994                dev_err(&xd->dev, "missing XDomain properties in response\n");
 995                goto err_free_dir;
 996        }
 997
 998        /* Release the existing one */
 999        if (xd->properties) {
1000                tb_property_free_dir(xd->properties);
1001                update = true;
1002        }
1003
1004        xd->properties = dir;
1005        xd->property_block_gen = gen;
1006
1007        tb_xdomain_restore_paths(xd);
1008
1009        mutex_unlock(&xd->lock);
1010
1011        kfree(block);
1012
1013        /*
1014         * Now the device should be ready enough so we can add it to the
1015         * bus and let userspace know about it. If the device is already
1016         * registered, we notify the userspace that it has changed.
1017         */
1018        if (!update) {
1019                if (device_add(&xd->dev)) {
1020                        dev_err(&xd->dev, "failed to add XDomain device\n");
1021                        return;
1022                }
1023        } else {
1024                kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE);
1025        }
1026
1027        enumerate_services(xd);
1028        return;
1029
1030err_free_dir:
1031        tb_property_free_dir(dir);
1032err_free_block:
1033        kfree(block);
1034        mutex_unlock(&xd->lock);
1035}
1036
1037static void tb_xdomain_properties_changed(struct work_struct *work)
1038{
1039        struct tb_xdomain *xd = container_of(work, typeof(*xd),
1040                                             properties_changed_work.work);
1041        int ret;
1042
1043        ret = tb_xdp_properties_changed_request(xd->tb->ctl, xd->route,
1044                                xd->properties_changed_retries, xd->local_uuid);
1045        if (ret) {
1046                if (xd->properties_changed_retries-- > 0)
1047                        queue_delayed_work(xd->tb->wq,
1048                                           &xd->properties_changed_work,
1049                                           msecs_to_jiffies(1000));
1050                return;
1051        }
1052
1053        xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
1054}
1055
1056static ssize_t device_show(struct device *dev, struct device_attribute *attr,
1057                           char *buf)
1058{
1059        struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1060
1061        return sprintf(buf, "%#x\n", xd->device);
1062}
1063static DEVICE_ATTR_RO(device);
1064
1065static ssize_t
1066device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1067{
1068        struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1069        int ret;
1070
1071        if (mutex_lock_interruptible(&xd->lock))
1072                return -ERESTARTSYS;
1073        ret = sprintf(buf, "%s\n", xd->device_name ? xd->device_name : "");
1074        mutex_unlock(&xd->lock);
1075
1076        return ret;
1077}
1078static DEVICE_ATTR_RO(device_name);
1079
1080static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
1081                           char *buf)
1082{
1083        struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1084
1085        return sprintf(buf, "%#x\n", xd->vendor);
1086}
1087static DEVICE_ATTR_RO(vendor);
1088
1089static ssize_t
1090vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1091{
1092        struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1093        int ret;
1094
1095        if (mutex_lock_interruptible(&xd->lock))
1096                return -ERESTARTSYS;
1097        ret = sprintf(buf, "%s\n", xd->vendor_name ? xd->vendor_name : "");
1098        mutex_unlock(&xd->lock);
1099
1100        return ret;
1101}
1102static DEVICE_ATTR_RO(vendor_name);
1103
1104static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
1105                              char *buf)
1106{
1107        struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1108
1109        return sprintf(buf, "%pUb\n", xd->remote_uuid);
1110}
1111static DEVICE_ATTR_RO(unique_id);
1112
1113static struct attribute *xdomain_attrs[] = {
1114        &dev_attr_device.attr,
1115        &dev_attr_device_name.attr,
1116        &dev_attr_unique_id.attr,
1117        &dev_attr_vendor.attr,
1118        &dev_attr_vendor_name.attr,
1119        NULL,
1120};
1121
1122static struct attribute_group xdomain_attr_group = {
1123        .attrs = xdomain_attrs,
1124};
1125
1126static const struct attribute_group *xdomain_attr_groups[] = {
1127        &xdomain_attr_group,
1128        NULL,
1129};
1130
1131static void tb_xdomain_release(struct device *dev)
1132{
1133        struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1134
1135        put_device(xd->dev.parent);
1136
1137        tb_property_free_dir(xd->properties);
1138        ida_destroy(&xd->service_ids);
1139
1140        kfree(xd->local_uuid);
1141        kfree(xd->remote_uuid);
1142        kfree(xd->device_name);
1143        kfree(xd->vendor_name);
1144        kfree(xd);
1145}
1146
1147static void start_handshake(struct tb_xdomain *xd)
1148{
1149        xd->uuid_retries = XDOMAIN_UUID_RETRIES;
1150        xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
1151        xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
1152
1153        if (xd->needs_uuid) {
1154                queue_delayed_work(xd->tb->wq, &xd->get_uuid_work,
1155                                   msecs_to_jiffies(100));
1156        } else {
1157                /* Start exchanging properties with the other host */
1158                queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
1159                                   msecs_to_jiffies(100));
1160                queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
1161                                   msecs_to_jiffies(1000));
1162        }
1163}
1164
1165static void stop_handshake(struct tb_xdomain *xd)
1166{
1167        xd->uuid_retries = 0;
1168        xd->properties_retries = 0;
1169        xd->properties_changed_retries = 0;
1170
1171        cancel_delayed_work_sync(&xd->get_uuid_work);
1172        cancel_delayed_work_sync(&xd->get_properties_work);
1173        cancel_delayed_work_sync(&xd->properties_changed_work);
1174}
1175
1176static int __maybe_unused tb_xdomain_suspend(struct device *dev)
1177{
1178        stop_handshake(tb_to_xdomain(dev));
1179        return 0;
1180}
1181
1182static int __maybe_unused tb_xdomain_resume(struct device *dev)
1183{
1184        struct tb_xdomain *xd = tb_to_xdomain(dev);
1185
1186        /*
1187         * Ask tb_xdomain_get_properties() restore any existing DMA
1188         * paths after properties are re-read.
1189         */
1190        xd->resume = true;
1191        start_handshake(xd);
1192
1193        return 0;
1194}
1195
1196static const struct dev_pm_ops tb_xdomain_pm_ops = {
1197        SET_SYSTEM_SLEEP_PM_OPS(tb_xdomain_suspend, tb_xdomain_resume)
1198};
1199
1200struct device_type tb_xdomain_type = {
1201        .name = "thunderbolt_xdomain",
1202        .release = tb_xdomain_release,
1203        .pm = &tb_xdomain_pm_ops,
1204};
1205EXPORT_SYMBOL_GPL(tb_xdomain_type);
1206
1207/**
1208 * tb_xdomain_alloc() - Allocate new XDomain object
1209 * @tb: Domain where the XDomain belongs
1210 * @parent: Parent device (the switch through the connection to the
1211 *          other domain is reached).
1212 * @route: Route string used to reach the other domain
1213 * @local_uuid: Our local domain UUID
1214 * @remote_uuid: UUID of the other domain (optional)
1215 *
1216 * Allocates new XDomain structure and returns pointer to that. The
1217 * object must be released by calling tb_xdomain_put().
1218 */
1219struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
1220                                    u64 route, const uuid_t *local_uuid,
1221                                    const uuid_t *remote_uuid)
1222{
1223        struct tb_xdomain *xd;
1224
1225        xd = kzalloc(sizeof(*xd), GFP_KERNEL);
1226        if (!xd)
1227                return NULL;
1228
1229        xd->tb = tb;
1230        xd->route = route;
1231        ida_init(&xd->service_ids);
1232        mutex_init(&xd->lock);
1233        INIT_DELAYED_WORK(&xd->get_uuid_work, tb_xdomain_get_uuid);
1234        INIT_DELAYED_WORK(&xd->get_properties_work, tb_xdomain_get_properties);
1235        INIT_DELAYED_WORK(&xd->properties_changed_work,
1236                          tb_xdomain_properties_changed);
1237
1238        xd->local_uuid = kmemdup(local_uuid, sizeof(uuid_t), GFP_KERNEL);
1239        if (!xd->local_uuid)
1240                goto err_free;
1241
1242        if (remote_uuid) {
1243                xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t),
1244                                          GFP_KERNEL);
1245                if (!xd->remote_uuid)
1246                        goto err_free_local_uuid;
1247        } else {
1248                xd->needs_uuid = true;
1249        }
1250
1251        device_initialize(&xd->dev);
1252        xd->dev.parent = get_device(parent);
1253        xd->dev.bus = &tb_bus_type;
1254        xd->dev.type = &tb_xdomain_type;
1255        xd->dev.groups = xdomain_attr_groups;
1256        dev_set_name(&xd->dev, "%u-%llx", tb->index, route);
1257
1258        /*
1259         * This keeps the DMA powered on as long as we have active
1260         * connection to another host.
1261         */
1262        pm_runtime_set_active(&xd->dev);
1263        pm_runtime_get_noresume(&xd->dev);
1264        pm_runtime_enable(&xd->dev);
1265
1266        return xd;
1267
1268err_free_local_uuid:
1269        kfree(xd->local_uuid);
1270err_free:
1271        kfree(xd);
1272
1273        return NULL;
1274}
1275
1276/**
1277 * tb_xdomain_add() - Add XDomain to the bus
1278 * @xd: XDomain to add
1279 *
1280 * This function starts XDomain discovery protocol handshake and
1281 * eventually adds the XDomain to the bus. After calling this function
1282 * the caller needs to call tb_xdomain_remove() in order to remove and
1283 * release the object regardless whether the handshake succeeded or not.
1284 */
1285void tb_xdomain_add(struct tb_xdomain *xd)
1286{
1287        /* Start exchanging properties with the other host */
1288        start_handshake(xd);
1289}
1290
1291static int unregister_service(struct device *dev, void *data)
1292{
1293        device_unregister(dev);
1294        return 0;
1295}
1296
1297/**
1298 * tb_xdomain_remove() - Remove XDomain from the bus
1299 * @xd: XDomain to remove
1300 *
1301 * This will stop all ongoing configuration work and remove the XDomain
1302 * along with any services from the bus. When the last reference to @xd
1303 * is released the object will be released as well.
1304 */
1305void tb_xdomain_remove(struct tb_xdomain *xd)
1306{
1307        stop_handshake(xd);
1308
1309        device_for_each_child_reverse(&xd->dev, xd, unregister_service);
1310
1311        /*
1312         * Undo runtime PM here explicitly because it is possible that
1313         * the XDomain was never added to the bus and thus device_del()
1314         * is not called for it (device_del() would handle this otherwise).
1315         */
1316        pm_runtime_disable(&xd->dev);
1317        pm_runtime_put_noidle(&xd->dev);
1318        pm_runtime_set_suspended(&xd->dev);
1319
1320        if (!device_is_registered(&xd->dev))
1321                put_device(&xd->dev);
1322        else
1323                device_unregister(&xd->dev);
1324}
1325
1326/**
1327 * tb_xdomain_enable_paths() - Enable DMA paths for XDomain connection
1328 * @xd: XDomain connection
1329 * @transmit_path: HopID of the transmit path the other end is using to
1330 *                 send packets
1331 * @transmit_ring: DMA ring used to receive packets from the other end
1332 * @receive_path: HopID of the receive path the other end is using to
1333 *                receive packets
1334 * @receive_ring: DMA ring used to send packets to the other end
1335 *
1336 * The function enables DMA paths accordingly so that after successful
1337 * return the caller can send and receive packets using high-speed DMA
1338 * path.
1339 *
1340 * Return: %0 in case of success and negative errno in case of error
1341 */
1342int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path,
1343                            u16 transmit_ring, u16 receive_path,
1344                            u16 receive_ring)
1345{
1346        int ret;
1347
1348        mutex_lock(&xd->lock);
1349
1350        if (xd->transmit_path) {
1351                ret = xd->transmit_path == transmit_path ? 0 : -EBUSY;
1352                goto exit_unlock;
1353        }
1354
1355        xd->transmit_path = transmit_path;
1356        xd->transmit_ring = transmit_ring;
1357        xd->receive_path = receive_path;
1358        xd->receive_ring = receive_ring;
1359
1360        ret = tb_domain_approve_xdomain_paths(xd->tb, xd);
1361
1362exit_unlock:
1363        mutex_unlock(&xd->lock);
1364
1365        return ret;
1366}
1367EXPORT_SYMBOL_GPL(tb_xdomain_enable_paths);
1368
1369/**
1370 * tb_xdomain_disable_paths() - Disable DMA paths for XDomain connection
1371 * @xd: XDomain connection
1372 *
1373 * This does the opposite of tb_xdomain_enable_paths(). After call to
1374 * this the caller is not expected to use the rings anymore.
1375 *
1376 * Return: %0 in case of success and negative errno in case of error
1377 */
1378int tb_xdomain_disable_paths(struct tb_xdomain *xd)
1379{
1380        int ret = 0;
1381
1382        mutex_lock(&xd->lock);
1383        if (xd->transmit_path) {
1384                xd->transmit_path = 0;
1385                xd->transmit_ring = 0;
1386                xd->receive_path = 0;
1387                xd->receive_ring = 0;
1388
1389                ret = tb_domain_disconnect_xdomain_paths(xd->tb, xd);
1390        }
1391        mutex_unlock(&xd->lock);
1392
1393        return ret;
1394}
1395EXPORT_SYMBOL_GPL(tb_xdomain_disable_paths);
1396
1397struct tb_xdomain_lookup {
1398        const uuid_t *uuid;
1399        u8 link;
1400        u8 depth;
1401        u64 route;
1402};
1403
1404static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw,
1405        const struct tb_xdomain_lookup *lookup)
1406{
1407        int i;
1408
1409        for (i = 1; i <= sw->config.max_port_number; i++) {
1410                struct tb_port *port = &sw->ports[i];
1411                struct tb_xdomain *xd;
1412
1413                if (port->xdomain) {
1414                        xd = port->xdomain;
1415
1416                        if (lookup->uuid) {
1417                                if (xd->remote_uuid &&
1418                                    uuid_equal(xd->remote_uuid, lookup->uuid))
1419                                        return xd;
1420                        } else if (lookup->link &&
1421                                   lookup->link == xd->link &&
1422                                   lookup->depth == xd->depth) {
1423                                return xd;
1424                        } else if (lookup->route &&
1425                                   lookup->route == xd->route) {
1426                                return xd;
1427                        }
1428                } else if (tb_port_has_remote(port)) {
1429                        xd = switch_find_xdomain(port->remote->sw, lookup);
1430                        if (xd)
1431                                return xd;
1432                }
1433        }
1434
1435        return NULL;
1436}
1437
1438/**
1439 * tb_xdomain_find_by_uuid() - Find an XDomain by UUID
1440 * @tb: Domain where the XDomain belongs to
1441 * @uuid: UUID to look for
1442 *
1443 * Finds XDomain by walking through the Thunderbolt topology below @tb.
1444 * The returned XDomain will have its reference count increased so the
1445 * caller needs to call tb_xdomain_put() when it is done with the
1446 * object.
1447 *
1448 * This will find all XDomains including the ones that are not yet added
1449 * to the bus (handshake is still in progress).
1450 *
1451 * The caller needs to hold @tb->lock.
1452 */
1453struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid)
1454{
1455        struct tb_xdomain_lookup lookup;
1456        struct tb_xdomain *xd;
1457
1458        memset(&lookup, 0, sizeof(lookup));
1459        lookup.uuid = uuid;
1460
1461        xd = switch_find_xdomain(tb->root_switch, &lookup);
1462        return tb_xdomain_get(xd);
1463}
1464EXPORT_SYMBOL_GPL(tb_xdomain_find_by_uuid);
1465
1466/**
1467 * tb_xdomain_find_by_link_depth() - Find an XDomain by link and depth
1468 * @tb: Domain where the XDomain belongs to
1469 * @link: Root switch link number
1470 * @depth: Depth in the link
1471 *
1472 * Finds XDomain by walking through the Thunderbolt topology below @tb.
1473 * The returned XDomain will have its reference count increased so the
1474 * caller needs to call tb_xdomain_put() when it is done with the
1475 * object.
1476 *
1477 * This will find all XDomains including the ones that are not yet added
1478 * to the bus (handshake is still in progress).
1479 *
1480 * The caller needs to hold @tb->lock.
1481 */
1482struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
1483                                                 u8 depth)
1484{
1485        struct tb_xdomain_lookup lookup;
1486        struct tb_xdomain *xd;
1487
1488        memset(&lookup, 0, sizeof(lookup));
1489        lookup.link = link;
1490        lookup.depth = depth;
1491
1492        xd = switch_find_xdomain(tb->root_switch, &lookup);
1493        return tb_xdomain_get(xd);
1494}
1495
1496/**
1497 * tb_xdomain_find_by_route() - Find an XDomain by route string
1498 * @tb: Domain where the XDomain belongs to
1499 * @route: XDomain route string
1500 *
1501 * Finds XDomain by walking through the Thunderbolt topology below @tb.
1502 * The returned XDomain will have its reference count increased so the
1503 * caller needs to call tb_xdomain_put() when it is done with the
1504 * object.
1505 *
1506 * This will find all XDomains including the ones that are not yet added
1507 * to the bus (handshake is still in progress).
1508 *
1509 * The caller needs to hold @tb->lock.
1510 */
1511struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route)
1512{
1513        struct tb_xdomain_lookup lookup;
1514        struct tb_xdomain *xd;
1515
1516        memset(&lookup, 0, sizeof(lookup));
1517        lookup.route = route;
1518
1519        xd = switch_find_xdomain(tb->root_switch, &lookup);
1520        return tb_xdomain_get(xd);
1521}
1522EXPORT_SYMBOL_GPL(tb_xdomain_find_by_route);
1523
1524bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type,
1525                               const void *buf, size_t size)
1526{
1527        const struct tb_protocol_handler *handler, *tmp;
1528        const struct tb_xdp_header *hdr = buf;
1529        unsigned int length;
1530        int ret = 0;
1531
1532        /* We expect the packet is at least size of the header */
1533        length = hdr->xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK;
1534        if (length != size / 4 - sizeof(hdr->xd_hdr) / 4)
1535                return true;
1536        if (length < sizeof(*hdr) / 4 - sizeof(hdr->xd_hdr) / 4)
1537                return true;
1538
1539        /*
1540         * Handle XDomain discovery protocol packets directly here. For
1541         * other protocols (based on their UUID) we call registered
1542         * handlers in turn.
1543         */
1544        if (uuid_equal(&hdr->uuid, &tb_xdp_uuid)) {
1545                if (type == TB_CFG_PKG_XDOMAIN_REQ)
1546                        return tb_xdp_schedule_request(tb, hdr, size);
1547                return false;
1548        }
1549
1550        mutex_lock(&xdomain_lock);
1551        list_for_each_entry_safe(handler, tmp, &protocol_handlers, list) {
1552                if (!uuid_equal(&hdr->uuid, handler->uuid))
1553                        continue;
1554
1555                mutex_unlock(&xdomain_lock);
1556                ret = handler->callback(buf, size, handler->data);
1557                mutex_lock(&xdomain_lock);
1558
1559                if (ret)
1560                        break;
1561        }
1562        mutex_unlock(&xdomain_lock);
1563
1564        return ret > 0;
1565}
1566
1567static int rebuild_property_block(void)
1568{
1569        u32 *block, len;
1570        int ret;
1571
1572        ret = tb_property_format_dir(xdomain_property_dir, NULL, 0);
1573        if (ret < 0)
1574                return ret;
1575
1576        len = ret;
1577
1578        block = kcalloc(len, sizeof(u32), GFP_KERNEL);
1579        if (!block)
1580                return -ENOMEM;
1581
1582        ret = tb_property_format_dir(xdomain_property_dir, block, len);
1583        if (ret) {
1584                kfree(block);
1585                return ret;
1586        }
1587
1588        kfree(xdomain_property_block);
1589        xdomain_property_block = block;
1590        xdomain_property_block_len = len;
1591        xdomain_property_block_gen++;
1592
1593        return 0;
1594}
1595
1596static int update_xdomain(struct device *dev, void *data)
1597{
1598        struct tb_xdomain *xd;
1599
1600        xd = tb_to_xdomain(dev);
1601        if (xd) {
1602                queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
1603                                   msecs_to_jiffies(50));
1604        }
1605
1606        return 0;
1607}
1608
1609static void update_all_xdomains(void)
1610{
1611        bus_for_each_dev(&tb_bus_type, NULL, NULL, update_xdomain);
1612}
1613
1614static bool remove_directory(const char *key, const struct tb_property_dir *dir)
1615{
1616        struct tb_property *p;
1617
1618        p = tb_property_find(xdomain_property_dir, key,
1619                             TB_PROPERTY_TYPE_DIRECTORY);
1620        if (p && p->value.dir == dir) {
1621                tb_property_remove(p);
1622                return true;
1623        }
1624        return false;
1625}
1626
1627/**
1628 * tb_register_property_dir() - Register property directory to the host
1629 * @key: Key (name) of the directory to add
1630 * @dir: Directory to add
1631 *
1632 * Service drivers can use this function to add new property directory
1633 * to the host available properties. The other connected hosts are
1634 * notified so they can re-read properties of this host if they are
1635 * interested.
1636 *
1637 * Return: %0 on success and negative errno on failure
1638 */
1639int tb_register_property_dir(const char *key, struct tb_property_dir *dir)
1640{
1641        int ret;
1642
1643        if (WARN_ON(!xdomain_property_dir))
1644                return -EAGAIN;
1645
1646        if (!key || strlen(key) > 8)
1647                return -EINVAL;
1648
1649        mutex_lock(&xdomain_lock);
1650        if (tb_property_find(xdomain_property_dir, key,
1651                             TB_PROPERTY_TYPE_DIRECTORY)) {
1652                ret = -EEXIST;
1653                goto err_unlock;
1654        }
1655
1656        ret = tb_property_add_dir(xdomain_property_dir, key, dir);
1657        if (ret)
1658                goto err_unlock;
1659
1660        ret = rebuild_property_block();
1661        if (ret) {
1662                remove_directory(key, dir);
1663                goto err_unlock;
1664        }
1665
1666        mutex_unlock(&xdomain_lock);
1667        update_all_xdomains();
1668        return 0;
1669
1670err_unlock:
1671        mutex_unlock(&xdomain_lock);
1672        return ret;
1673}
1674EXPORT_SYMBOL_GPL(tb_register_property_dir);
1675
1676/**
1677 * tb_unregister_property_dir() - Removes property directory from host
1678 * @key: Key (name) of the directory
1679 * @dir: Directory to remove
1680 *
1681 * This will remove the existing directory from this host and notify the
1682 * connected hosts about the change.
1683 */
1684void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir)
1685{
1686        int ret = 0;
1687
1688        mutex_lock(&xdomain_lock);
1689        if (remove_directory(key, dir))
1690                ret = rebuild_property_block();
1691        mutex_unlock(&xdomain_lock);
1692
1693        if (!ret)
1694                update_all_xdomains();
1695}
1696EXPORT_SYMBOL_GPL(tb_unregister_property_dir);
1697
1698int tb_xdomain_init(void)
1699{
1700        int ret;
1701
1702        xdomain_property_dir = tb_property_create_dir(NULL);
1703        if (!xdomain_property_dir)
1704                return -ENOMEM;
1705
1706        /*
1707         * Initialize standard set of properties without any service
1708         * directories. Those will be added by service drivers
1709         * themselves when they are loaded.
1710         */
1711        tb_property_add_immediate(xdomain_property_dir, "vendorid",
1712                                  PCI_VENDOR_ID_INTEL);
1713        tb_property_add_text(xdomain_property_dir, "vendorid", "Intel Corp.");
1714        tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1);
1715        tb_property_add_text(xdomain_property_dir, "deviceid",
1716                             utsname()->nodename);
1717        tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100);
1718
1719        ret = rebuild_property_block();
1720        if (ret) {
1721                tb_property_free_dir(xdomain_property_dir);
1722                xdomain_property_dir = NULL;
1723        }
1724
1725        return ret;
1726}
1727
1728void tb_xdomain_exit(void)
1729{
1730        kfree(xdomain_property_block);
1731        tb_property_free_dir(xdomain_property_dir);
1732}
1733