linux/drivers/thunderbolt/xdomain.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Thunderbolt XDomain discovery protocol support
   4 *
   5 * Copyright (C) 2017, Intel Corporation
   6 * Authors: Michael Jamet <michael.jamet@intel.com>
   7 *          Mika Westerberg <mika.westerberg@linux.intel.com>
   8 */
   9
  10#include <linux/device.h>
  11#include <linux/kmod.h>
  12#include <linux/module.h>
  13#include <linux/pm_runtime.h>
  14#include <linux/utsname.h>
  15#include <linux/uuid.h>
  16#include <linux/workqueue.h>
  17
  18#include "tb.h"
  19
  20#define XDOMAIN_DEFAULT_TIMEOUT                 5000 /* ms */
  21#define XDOMAIN_PROPERTIES_RETRIES              60
  22#define XDOMAIN_PROPERTIES_CHANGED_RETRIES      10
  23
  24struct xdomain_request_work {
  25        struct work_struct work;
  26        struct tb_xdp_header *pkg;
  27        struct tb *tb;
  28};
  29
  30/* Serializes access to the properties and protocol handlers below */
  31static DEFINE_MUTEX(xdomain_lock);
  32
  33/* Properties exposed to the remote domains */
  34static struct tb_property_dir *xdomain_property_dir;
  35static u32 *xdomain_property_block;
  36static u32 xdomain_property_block_len;
  37static u32 xdomain_property_block_gen;
  38
  39/* Additional protocol handlers */
  40static LIST_HEAD(protocol_handlers);
  41
  42/* UUID for XDomain discovery protocol: b638d70e-42ff-40bb-97c2-90e2c0b2ff07 */
  43static const uuid_t tb_xdp_uuid =
  44        UUID_INIT(0xb638d70e, 0x42ff, 0x40bb,
  45                  0x97, 0xc2, 0x90, 0xe2, 0xc0, 0xb2, 0xff, 0x07);
  46
  47static bool tb_xdomain_match(const struct tb_cfg_request *req,
  48                             const struct ctl_pkg *pkg)
  49{
  50        switch (pkg->frame.eof) {
  51        case TB_CFG_PKG_ERROR:
  52                return true;
  53
  54        case TB_CFG_PKG_XDOMAIN_RESP: {
  55                const struct tb_xdp_header *res_hdr = pkg->buffer;
  56                const struct tb_xdp_header *req_hdr = req->request;
  57
  58                if (pkg->frame.size < req->response_size / 4)
  59                        return false;
  60
  61                /* Make sure route matches */
  62                if ((res_hdr->xd_hdr.route_hi & ~BIT(31)) !=
  63                     req_hdr->xd_hdr.route_hi)
  64                        return false;
  65                if ((res_hdr->xd_hdr.route_lo) != req_hdr->xd_hdr.route_lo)
  66                        return false;
  67
  68                /* Check that the XDomain protocol matches */
  69                if (!uuid_equal(&res_hdr->uuid, &req_hdr->uuid))
  70                        return false;
  71
  72                return true;
  73        }
  74
  75        default:
  76                return false;
  77        }
  78}
  79
  80static bool tb_xdomain_copy(struct tb_cfg_request *req,
  81                            const struct ctl_pkg *pkg)
  82{
  83        memcpy(req->response, pkg->buffer, req->response_size);
  84        req->result.err = 0;
  85        return true;
  86}
  87
  88static void response_ready(void *data)
  89{
  90        tb_cfg_request_put(data);
  91}
  92
  93static int __tb_xdomain_response(struct tb_ctl *ctl, const void *response,
  94                                 size_t size, enum tb_cfg_pkg_type type)
  95{
  96        struct tb_cfg_request *req;
  97
  98        req = tb_cfg_request_alloc();
  99        if (!req)
 100                return -ENOMEM;
 101
 102        req->match = tb_xdomain_match;
 103        req->copy = tb_xdomain_copy;
 104        req->request = response;
 105        req->request_size = size;
 106        req->request_type = type;
 107
 108        return tb_cfg_request(ctl, req, response_ready, req);
 109}
 110
 111/**
 112 * tb_xdomain_response() - Send a XDomain response message
 113 * @xd: XDomain to send the message
 114 * @response: Response to send
 115 * @size: Size of the response
 116 * @type: PDF type of the response
 117 *
 118 * This can be used to send a XDomain response message to the other
 119 * domain. No response for the message is expected.
 120 *
 121 * Return: %0 in case of success and negative errno in case of failure
 122 */
 123int tb_xdomain_response(struct tb_xdomain *xd, const void *response,
 124                        size_t size, enum tb_cfg_pkg_type type)
 125{
 126        return __tb_xdomain_response(xd->tb->ctl, response, size, type);
 127}
 128EXPORT_SYMBOL_GPL(tb_xdomain_response);
 129
 130static int __tb_xdomain_request(struct tb_ctl *ctl, const void *request,
 131        size_t request_size, enum tb_cfg_pkg_type request_type, void *response,
 132        size_t response_size, enum tb_cfg_pkg_type response_type,
 133        unsigned int timeout_msec)
 134{
 135        struct tb_cfg_request *req;
 136        struct tb_cfg_result res;
 137
 138        req = tb_cfg_request_alloc();
 139        if (!req)
 140                return -ENOMEM;
 141
 142        req->match = tb_xdomain_match;
 143        req->copy = tb_xdomain_copy;
 144        req->request = request;
 145        req->request_size = request_size;
 146        req->request_type = request_type;
 147        req->response = response;
 148        req->response_size = response_size;
 149        req->response_type = response_type;
 150
 151        res = tb_cfg_request_sync(ctl, req, timeout_msec);
 152
 153        tb_cfg_request_put(req);
 154
 155        return res.err == 1 ? -EIO : res.err;
 156}
 157
 158/**
 159 * tb_xdomain_request() - Send a XDomain request
 160 * @xd: XDomain to send the request
 161 * @request: Request to send
 162 * @request_size: Size of the request in bytes
 163 * @request_type: PDF type of the request
 164 * @response: Response is copied here
 165 * @response_size: Expected size of the response in bytes
 166 * @response_type: Expected PDF type of the response
 167 * @timeout_msec: Timeout in milliseconds to wait for the response
 168 *
 169 * This function can be used to send XDomain control channel messages to
 170 * the other domain. The function waits until the response is received
 171 * or when timeout triggers. Whichever comes first.
 172 *
 173 * Return: %0 in case of success and negative errno in case of failure
 174 */
 175int tb_xdomain_request(struct tb_xdomain *xd, const void *request,
 176        size_t request_size, enum tb_cfg_pkg_type request_type,
 177        void *response, size_t response_size,
 178        enum tb_cfg_pkg_type response_type, unsigned int timeout_msec)
 179{
 180        return __tb_xdomain_request(xd->tb->ctl, request, request_size,
 181                                    request_type, response, response_size,
 182                                    response_type, timeout_msec);
 183}
 184EXPORT_SYMBOL_GPL(tb_xdomain_request);
 185
 186static inline void tb_xdp_fill_header(struct tb_xdp_header *hdr, u64 route,
 187        u8 sequence, enum tb_xdp_type type, size_t size)
 188{
 189        u32 length_sn;
 190
 191        length_sn = (size - sizeof(hdr->xd_hdr)) / 4;
 192        length_sn |= (sequence << TB_XDOMAIN_SN_SHIFT) & TB_XDOMAIN_SN_MASK;
 193
 194        hdr->xd_hdr.route_hi = upper_32_bits(route);
 195        hdr->xd_hdr.route_lo = lower_32_bits(route);
 196        hdr->xd_hdr.length_sn = length_sn;
 197        hdr->type = type;
 198        memcpy(&hdr->uuid, &tb_xdp_uuid, sizeof(tb_xdp_uuid));
 199}
 200
 201static int tb_xdp_handle_error(const struct tb_xdp_header *hdr)
 202{
 203        const struct tb_xdp_error_response *error;
 204
 205        if (hdr->type != ERROR_RESPONSE)
 206                return 0;
 207
 208        error = (const struct tb_xdp_error_response *)hdr;
 209
 210        switch (error->error) {
 211        case ERROR_UNKNOWN_PACKET:
 212        case ERROR_UNKNOWN_DOMAIN:
 213                return -EIO;
 214        case ERROR_NOT_SUPPORTED:
 215                return -ENOTSUPP;
 216        case ERROR_NOT_READY:
 217                return -EAGAIN;
 218        default:
 219                break;
 220        }
 221
 222        return 0;
 223}
 224
 225static int tb_xdp_error_response(struct tb_ctl *ctl, u64 route, u8 sequence,
 226                                 enum tb_xdp_error error)
 227{
 228        struct tb_xdp_error_response res;
 229
 230        memset(&res, 0, sizeof(res));
 231        tb_xdp_fill_header(&res.hdr, route, sequence, ERROR_RESPONSE,
 232                           sizeof(res));
 233        res.error = error;
 234
 235        return __tb_xdomain_response(ctl, &res, sizeof(res),
 236                                     TB_CFG_PKG_XDOMAIN_RESP);
 237}
 238
 239static int tb_xdp_properties_request(struct tb_ctl *ctl, u64 route,
 240        const uuid_t *src_uuid, const uuid_t *dst_uuid, int retry,
 241        u32 **block, u32 *generation)
 242{
 243        struct tb_xdp_properties_response *res;
 244        struct tb_xdp_properties req;
 245        u16 data_len, len;
 246        size_t total_size;
 247        u32 *data = NULL;
 248        int ret;
 249
 250        total_size = sizeof(*res) + TB_XDP_PROPERTIES_MAX_DATA_LENGTH * 4;
 251        res = kzalloc(total_size, GFP_KERNEL);
 252        if (!res)
 253                return -ENOMEM;
 254
 255        memset(&req, 0, sizeof(req));
 256        tb_xdp_fill_header(&req.hdr, route, retry % 4, PROPERTIES_REQUEST,
 257                           sizeof(req));
 258        memcpy(&req.src_uuid, src_uuid, sizeof(*src_uuid));
 259        memcpy(&req.dst_uuid, dst_uuid, sizeof(*dst_uuid));
 260
 261        len = 0;
 262        data_len = 0;
 263
 264        do {
 265                ret = __tb_xdomain_request(ctl, &req, sizeof(req),
 266                                           TB_CFG_PKG_XDOMAIN_REQ, res,
 267                                           total_size, TB_CFG_PKG_XDOMAIN_RESP,
 268                                           XDOMAIN_DEFAULT_TIMEOUT);
 269                if (ret)
 270                        goto err;
 271
 272                ret = tb_xdp_handle_error(&res->hdr);
 273                if (ret)
 274                        goto err;
 275
 276                /*
 277                 * Package length includes the whole payload without the
 278                 * XDomain header. Validate first that the package is at
 279                 * least size of the response structure.
 280                 */
 281                len = res->hdr.xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK;
 282                if (len < sizeof(*res) / 4) {
 283                        ret = -EINVAL;
 284                        goto err;
 285                }
 286
 287                len += sizeof(res->hdr.xd_hdr) / 4;
 288                len -= sizeof(*res) / 4;
 289
 290                if (res->offset != req.offset) {
 291                        ret = -EINVAL;
 292                        goto err;
 293                }
 294
 295                /*
 296                 * First time allocate block that has enough space for
 297                 * the whole properties block.
 298                 */
 299                if (!data) {
 300                        data_len = res->data_length;
 301                        if (data_len > TB_XDP_PROPERTIES_MAX_LENGTH) {
 302                                ret = -E2BIG;
 303                                goto err;
 304                        }
 305
 306                        data = kcalloc(data_len, sizeof(u32), GFP_KERNEL);
 307                        if (!data) {
 308                                ret = -ENOMEM;
 309                                goto err;
 310                        }
 311                }
 312
 313                memcpy(data + req.offset, res->data, len * 4);
 314                req.offset += len;
 315        } while (!data_len || req.offset < data_len);
 316
 317        *block = data;
 318        *generation = res->generation;
 319
 320        kfree(res);
 321
 322        return data_len;
 323
 324err:
 325        kfree(data);
 326        kfree(res);
 327
 328        return ret;
 329}
 330
 331static int tb_xdp_properties_response(struct tb *tb, struct tb_ctl *ctl,
 332        u64 route, u8 sequence, const uuid_t *src_uuid,
 333        const struct tb_xdp_properties *req)
 334{
 335        struct tb_xdp_properties_response *res;
 336        size_t total_size;
 337        u16 len;
 338        int ret;
 339
 340        /*
 341         * Currently we expect all requests to be directed to us. The
 342         * protocol supports forwarding, though which we might add
 343         * support later on.
 344         */
 345        if (!uuid_equal(src_uuid, &req->dst_uuid)) {
 346                tb_xdp_error_response(ctl, route, sequence,
 347                                      ERROR_UNKNOWN_DOMAIN);
 348                return 0;
 349        }
 350
 351        mutex_lock(&xdomain_lock);
 352
 353        if (req->offset >= xdomain_property_block_len) {
 354                mutex_unlock(&xdomain_lock);
 355                return -EINVAL;
 356        }
 357
 358        len = xdomain_property_block_len - req->offset;
 359        len = min_t(u16, len, TB_XDP_PROPERTIES_MAX_DATA_LENGTH);
 360        total_size = sizeof(*res) + len * 4;
 361
 362        res = kzalloc(total_size, GFP_KERNEL);
 363        if (!res) {
 364                mutex_unlock(&xdomain_lock);
 365                return -ENOMEM;
 366        }
 367
 368        tb_xdp_fill_header(&res->hdr, route, sequence, PROPERTIES_RESPONSE,
 369                           total_size);
 370        res->generation = xdomain_property_block_gen;
 371        res->data_length = xdomain_property_block_len;
 372        res->offset = req->offset;
 373        uuid_copy(&res->src_uuid, src_uuid);
 374        uuid_copy(&res->dst_uuid, &req->src_uuid);
 375        memcpy(res->data, &xdomain_property_block[req->offset], len * 4);
 376
 377        mutex_unlock(&xdomain_lock);
 378
 379        ret = __tb_xdomain_response(ctl, res, total_size,
 380                                    TB_CFG_PKG_XDOMAIN_RESP);
 381
 382        kfree(res);
 383        return ret;
 384}
 385
 386static int tb_xdp_properties_changed_request(struct tb_ctl *ctl, u64 route,
 387                                             int retry, const uuid_t *uuid)
 388{
 389        struct tb_xdp_properties_changed_response res;
 390        struct tb_xdp_properties_changed req;
 391        int ret;
 392
 393        memset(&req, 0, sizeof(req));
 394        tb_xdp_fill_header(&req.hdr, route, retry % 4,
 395                           PROPERTIES_CHANGED_REQUEST, sizeof(req));
 396        uuid_copy(&req.src_uuid, uuid);
 397
 398        memset(&res, 0, sizeof(res));
 399        ret = __tb_xdomain_request(ctl, &req, sizeof(req),
 400                                   TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res),
 401                                   TB_CFG_PKG_XDOMAIN_RESP,
 402                                   XDOMAIN_DEFAULT_TIMEOUT);
 403        if (ret)
 404                return ret;
 405
 406        return tb_xdp_handle_error(&res.hdr);
 407}
 408
 409static int
 410tb_xdp_properties_changed_response(struct tb_ctl *ctl, u64 route, u8 sequence)
 411{
 412        struct tb_xdp_properties_changed_response res;
 413
 414        memset(&res, 0, sizeof(res));
 415        tb_xdp_fill_header(&res.hdr, route, sequence,
 416                           PROPERTIES_CHANGED_RESPONSE, sizeof(res));
 417        return __tb_xdomain_response(ctl, &res, sizeof(res),
 418                                     TB_CFG_PKG_XDOMAIN_RESP);
 419}
 420
 421/**
 422 * tb_register_protocol_handler() - Register protocol handler
 423 * @handler: Handler to register
 424 *
 425 * This allows XDomain service drivers to hook into incoming XDomain
 426 * messages. After this function is called the service driver needs to
 427 * be able to handle calls to callback whenever a package with the
 428 * registered protocol is received.
 429 */
 430int tb_register_protocol_handler(struct tb_protocol_handler *handler)
 431{
 432        if (!handler->uuid || !handler->callback)
 433                return -EINVAL;
 434        if (uuid_equal(handler->uuid, &tb_xdp_uuid))
 435                return -EINVAL;
 436
 437        mutex_lock(&xdomain_lock);
 438        list_add_tail(&handler->list, &protocol_handlers);
 439        mutex_unlock(&xdomain_lock);
 440
 441        return 0;
 442}
 443EXPORT_SYMBOL_GPL(tb_register_protocol_handler);
 444
 445/**
 446 * tb_unregister_protocol_handler() - Unregister protocol handler
 447 * @handler: Handler to unregister
 448 *
 449 * Removes the previously registered protocol handler.
 450 */
 451void tb_unregister_protocol_handler(struct tb_protocol_handler *handler)
 452{
 453        mutex_lock(&xdomain_lock);
 454        list_del_init(&handler->list);
 455        mutex_unlock(&xdomain_lock);
 456}
 457EXPORT_SYMBOL_GPL(tb_unregister_protocol_handler);
 458
 459static void tb_xdp_handle_request(struct work_struct *work)
 460{
 461        struct xdomain_request_work *xw = container_of(work, typeof(*xw), work);
 462        const struct tb_xdp_header *pkg = xw->pkg;
 463        const struct tb_xdomain_header *xhdr = &pkg->xd_hdr;
 464        struct tb *tb = xw->tb;
 465        struct tb_ctl *ctl = tb->ctl;
 466        const uuid_t *uuid;
 467        int ret = 0;
 468        u32 sequence;
 469        u64 route;
 470
 471        route = ((u64)xhdr->route_hi << 32 | xhdr->route_lo) & ~BIT_ULL(63);
 472        sequence = xhdr->length_sn & TB_XDOMAIN_SN_MASK;
 473        sequence >>= TB_XDOMAIN_SN_SHIFT;
 474
 475        mutex_lock(&tb->lock);
 476        if (tb->root_switch)
 477                uuid = tb->root_switch->uuid;
 478        else
 479                uuid = NULL;
 480        mutex_unlock(&tb->lock);
 481
 482        if (!uuid) {
 483                tb_xdp_error_response(ctl, route, sequence, ERROR_NOT_READY);
 484                goto out;
 485        }
 486
 487        switch (pkg->type) {
 488        case PROPERTIES_REQUEST:
 489                ret = tb_xdp_properties_response(tb, ctl, route, sequence, uuid,
 490                        (const struct tb_xdp_properties *)pkg);
 491                break;
 492
 493        case PROPERTIES_CHANGED_REQUEST: {
 494                const struct tb_xdp_properties_changed *xchg =
 495                        (const struct tb_xdp_properties_changed *)pkg;
 496                struct tb_xdomain *xd;
 497
 498                ret = tb_xdp_properties_changed_response(ctl, route, sequence);
 499
 500                /*
 501                 * Since the properties have been changed, let's update
 502                 * the xdomain related to this connection as well in
 503                 * case there is a change in services it offers.
 504                 */
 505                xd = tb_xdomain_find_by_uuid_locked(tb, &xchg->src_uuid);
 506                if (xd) {
 507                        queue_delayed_work(tb->wq, &xd->get_properties_work,
 508                                           msecs_to_jiffies(50));
 509                        tb_xdomain_put(xd);
 510                }
 511
 512                break;
 513        }
 514
 515        default:
 516                break;
 517        }
 518
 519        if (ret) {
 520                tb_warn(tb, "failed to send XDomain response for %#x\n",
 521                        pkg->type);
 522        }
 523
 524out:
 525        kfree(xw->pkg);
 526        kfree(xw);
 527}
 528
 529static void
 530tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr,
 531                        size_t size)
 532{
 533        struct xdomain_request_work *xw;
 534
 535        xw = kmalloc(sizeof(*xw), GFP_KERNEL);
 536        if (!xw)
 537                return;
 538
 539        INIT_WORK(&xw->work, tb_xdp_handle_request);
 540        xw->pkg = kmemdup(hdr, size, GFP_KERNEL);
 541        xw->tb = tb;
 542
 543        queue_work(tb->wq, &xw->work);
 544}
 545
 546/**
 547 * tb_register_service_driver() - Register XDomain service driver
 548 * @drv: Driver to register
 549 *
 550 * Registers new service driver from @drv to the bus.
 551 */
 552int tb_register_service_driver(struct tb_service_driver *drv)
 553{
 554        drv->driver.bus = &tb_bus_type;
 555        return driver_register(&drv->driver);
 556}
 557EXPORT_SYMBOL_GPL(tb_register_service_driver);
 558
 559/**
 560 * tb_unregister_service_driver() - Unregister XDomain service driver
 561 * @xdrv: Driver to unregister
 562 *
 563 * Unregisters XDomain service driver from the bus.
 564 */
 565void tb_unregister_service_driver(struct tb_service_driver *drv)
 566{
 567        driver_unregister(&drv->driver);
 568}
 569EXPORT_SYMBOL_GPL(tb_unregister_service_driver);
 570
 571static ssize_t key_show(struct device *dev, struct device_attribute *attr,
 572                        char *buf)
 573{
 574        struct tb_service *svc = container_of(dev, struct tb_service, dev);
 575
 576        /*
 577         * It should be null terminated but anything else is pretty much
 578         * allowed.
 579         */
 580        return sprintf(buf, "%*pEp\n", (int)strlen(svc->key), svc->key);
 581}
 582static DEVICE_ATTR_RO(key);
 583
 584static int get_modalias(struct tb_service *svc, char *buf, size_t size)
 585{
 586        return snprintf(buf, size, "tbsvc:k%sp%08Xv%08Xr%08X", svc->key,
 587                        svc->prtcid, svc->prtcvers, svc->prtcrevs);
 588}
 589
 590static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
 591                             char *buf)
 592{
 593        struct tb_service *svc = container_of(dev, struct tb_service, dev);
 594
 595        /* Full buffer size except new line and null termination */
 596        get_modalias(svc, buf, PAGE_SIZE - 2);
 597        return sprintf(buf, "%s\n", buf);
 598}
 599static DEVICE_ATTR_RO(modalias);
 600
 601static ssize_t prtcid_show(struct device *dev, struct device_attribute *attr,
 602                           char *buf)
 603{
 604        struct tb_service *svc = container_of(dev, struct tb_service, dev);
 605
 606        return sprintf(buf, "%u\n", svc->prtcid);
 607}
 608static DEVICE_ATTR_RO(prtcid);
 609
 610static ssize_t prtcvers_show(struct device *dev, struct device_attribute *attr,
 611                             char *buf)
 612{
 613        struct tb_service *svc = container_of(dev, struct tb_service, dev);
 614
 615        return sprintf(buf, "%u\n", svc->prtcvers);
 616}
 617static DEVICE_ATTR_RO(prtcvers);
 618
 619static ssize_t prtcrevs_show(struct device *dev, struct device_attribute *attr,
 620                             char *buf)
 621{
 622        struct tb_service *svc = container_of(dev, struct tb_service, dev);
 623
 624        return sprintf(buf, "%u\n", svc->prtcrevs);
 625}
 626static DEVICE_ATTR_RO(prtcrevs);
 627
 628static ssize_t prtcstns_show(struct device *dev, struct device_attribute *attr,
 629                             char *buf)
 630{
 631        struct tb_service *svc = container_of(dev, struct tb_service, dev);
 632
 633        return sprintf(buf, "0x%08x\n", svc->prtcstns);
 634}
 635static DEVICE_ATTR_RO(prtcstns);
 636
 637static struct attribute *tb_service_attrs[] = {
 638        &dev_attr_key.attr,
 639        &dev_attr_modalias.attr,
 640        &dev_attr_prtcid.attr,
 641        &dev_attr_prtcvers.attr,
 642        &dev_attr_prtcrevs.attr,
 643        &dev_attr_prtcstns.attr,
 644        NULL,
 645};
 646
 647static struct attribute_group tb_service_attr_group = {
 648        .attrs = tb_service_attrs,
 649};
 650
 651static const struct attribute_group *tb_service_attr_groups[] = {
 652        &tb_service_attr_group,
 653        NULL,
 654};
 655
 656static int tb_service_uevent(struct device *dev, struct kobj_uevent_env *env)
 657{
 658        struct tb_service *svc = container_of(dev, struct tb_service, dev);
 659        char modalias[64];
 660
 661        get_modalias(svc, modalias, sizeof(modalias));
 662        return add_uevent_var(env, "MODALIAS=%s", modalias);
 663}
 664
 665static void tb_service_release(struct device *dev)
 666{
 667        struct tb_service *svc = container_of(dev, struct tb_service, dev);
 668        struct tb_xdomain *xd = tb_service_parent(svc);
 669
 670        ida_simple_remove(&xd->service_ids, svc->id);
 671        kfree(svc->key);
 672        kfree(svc);
 673}
 674
 675struct device_type tb_service_type = {
 676        .name = "thunderbolt_service",
 677        .groups = tb_service_attr_groups,
 678        .uevent = tb_service_uevent,
 679        .release = tb_service_release,
 680};
 681EXPORT_SYMBOL_GPL(tb_service_type);
 682
 683static int remove_missing_service(struct device *dev, void *data)
 684{
 685        struct tb_xdomain *xd = data;
 686        struct tb_service *svc;
 687
 688        svc = tb_to_service(dev);
 689        if (!svc)
 690                return 0;
 691
 692        if (!tb_property_find(xd->properties, svc->key,
 693                              TB_PROPERTY_TYPE_DIRECTORY))
 694                device_unregister(dev);
 695
 696        return 0;
 697}
 698
 699static int find_service(struct device *dev, void *data)
 700{
 701        const struct tb_property *p = data;
 702        struct tb_service *svc;
 703
 704        svc = tb_to_service(dev);
 705        if (!svc)
 706                return 0;
 707
 708        return !strcmp(svc->key, p->key);
 709}
 710
 711static int populate_service(struct tb_service *svc,
 712                            struct tb_property *property)
 713{
 714        struct tb_property_dir *dir = property->value.dir;
 715        struct tb_property *p;
 716
 717        /* Fill in standard properties */
 718        p = tb_property_find(dir, "prtcid", TB_PROPERTY_TYPE_VALUE);
 719        if (p)
 720                svc->prtcid = p->value.immediate;
 721        p = tb_property_find(dir, "prtcvers", TB_PROPERTY_TYPE_VALUE);
 722        if (p)
 723                svc->prtcvers = p->value.immediate;
 724        p = tb_property_find(dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE);
 725        if (p)
 726                svc->prtcrevs = p->value.immediate;
 727        p = tb_property_find(dir, "prtcstns", TB_PROPERTY_TYPE_VALUE);
 728        if (p)
 729                svc->prtcstns = p->value.immediate;
 730
 731        svc->key = kstrdup(property->key, GFP_KERNEL);
 732        if (!svc->key)
 733                return -ENOMEM;
 734
 735        return 0;
 736}
 737
 738static void enumerate_services(struct tb_xdomain *xd)
 739{
 740        struct tb_service *svc;
 741        struct tb_property *p;
 742        struct device *dev;
 743
 744        /*
 745         * First remove all services that are not available anymore in
 746         * the updated property block.
 747         */
 748        device_for_each_child_reverse(&xd->dev, xd, remove_missing_service);
 749
 750        /* Then re-enumerate properties creating new services as we go */
 751        tb_property_for_each(xd->properties, p) {
 752                if (p->type != TB_PROPERTY_TYPE_DIRECTORY)
 753                        continue;
 754
 755                /* If the service exists already we are fine */
 756                dev = device_find_child(&xd->dev, p, find_service);
 757                if (dev) {
 758                        put_device(dev);
 759                        continue;
 760                }
 761
 762                svc = kzalloc(sizeof(*svc), GFP_KERNEL);
 763                if (!svc)
 764                        break;
 765
 766                if (populate_service(svc, p)) {
 767                        kfree(svc);
 768                        break;
 769                }
 770
 771                svc->id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
 772                svc->dev.bus = &tb_bus_type;
 773                svc->dev.type = &tb_service_type;
 774                svc->dev.parent = &xd->dev;
 775                dev_set_name(&svc->dev, "%s.%d", dev_name(&xd->dev), svc->id);
 776
 777                if (device_register(&svc->dev)) {
 778                        put_device(&svc->dev);
 779                        break;
 780                }
 781        }
 782}
 783
 784static int populate_properties(struct tb_xdomain *xd,
 785                               struct tb_property_dir *dir)
 786{
 787        const struct tb_property *p;
 788
 789        /* Required properties */
 790        p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
 791        if (!p)
 792                return -EINVAL;
 793        xd->device = p->value.immediate;
 794
 795        p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE);
 796        if (!p)
 797                return -EINVAL;
 798        xd->vendor = p->value.immediate;
 799
 800        kfree(xd->device_name);
 801        xd->device_name = NULL;
 802        kfree(xd->vendor_name);
 803        xd->vendor_name = NULL;
 804
 805        /* Optional properties */
 806        p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
 807        if (p)
 808                xd->device_name = kstrdup(p->value.text, GFP_KERNEL);
 809        p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT);
 810        if (p)
 811                xd->vendor_name = kstrdup(p->value.text, GFP_KERNEL);
 812
 813        return 0;
 814}
 815
 816/* Called with @xd->lock held */
 817static void tb_xdomain_restore_paths(struct tb_xdomain *xd)
 818{
 819        if (!xd->resume)
 820                return;
 821
 822        xd->resume = false;
 823        if (xd->transmit_path) {
 824                dev_dbg(&xd->dev, "re-establishing DMA path\n");
 825                tb_domain_approve_xdomain_paths(xd->tb, xd);
 826        }
 827}
 828
 829static void tb_xdomain_get_properties(struct work_struct *work)
 830{
 831        struct tb_xdomain *xd = container_of(work, typeof(*xd),
 832                                             get_properties_work.work);
 833        struct tb_property_dir *dir;
 834        struct tb *tb = xd->tb;
 835        bool update = false;
 836        u32 *block = NULL;
 837        u32 gen = 0;
 838        int ret;
 839
 840        ret = tb_xdp_properties_request(tb->ctl, xd->route, xd->local_uuid,
 841                                        xd->remote_uuid, xd->properties_retries,
 842                                        &block, &gen);
 843        if (ret < 0) {
 844                if (xd->properties_retries-- > 0) {
 845                        queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
 846                                           msecs_to_jiffies(1000));
 847                } else {
 848                        /* Give up now */
 849                        dev_err(&xd->dev,
 850                                "failed read XDomain properties from %pUb\n",
 851                                xd->remote_uuid);
 852                }
 853                return;
 854        }
 855
 856        xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
 857
 858        mutex_lock(&xd->lock);
 859
 860        /* Only accept newer generation properties */
 861        if (xd->properties && gen <= xd->property_block_gen) {
 862                /*
 863                 * On resume it is likely that the properties block is
 864                 * not changed (unless the other end added or removed
 865                 * services). However, we need to make sure the existing
 866                 * DMA paths are restored properly.
 867                 */
 868                tb_xdomain_restore_paths(xd);
 869                goto err_free_block;
 870        }
 871
 872        dir = tb_property_parse_dir(block, ret);
 873        if (!dir) {
 874                dev_err(&xd->dev, "failed to parse XDomain properties\n");
 875                goto err_free_block;
 876        }
 877
 878        ret = populate_properties(xd, dir);
 879        if (ret) {
 880                dev_err(&xd->dev, "missing XDomain properties in response\n");
 881                goto err_free_dir;
 882        }
 883
 884        /* Release the existing one */
 885        if (xd->properties) {
 886                tb_property_free_dir(xd->properties);
 887                update = true;
 888        }
 889
 890        xd->properties = dir;
 891        xd->property_block_gen = gen;
 892
 893        tb_xdomain_restore_paths(xd);
 894
 895        mutex_unlock(&xd->lock);
 896
 897        kfree(block);
 898
 899        /*
 900         * Now the device should be ready enough so we can add it to the
 901         * bus and let userspace know about it. If the device is already
 902         * registered, we notify the userspace that it has changed.
 903         */
 904        if (!update) {
 905                if (device_add(&xd->dev)) {
 906                        dev_err(&xd->dev, "failed to add XDomain device\n");
 907                        return;
 908                }
 909        } else {
 910                kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE);
 911        }
 912
 913        enumerate_services(xd);
 914        return;
 915
 916err_free_dir:
 917        tb_property_free_dir(dir);
 918err_free_block:
 919        kfree(block);
 920        mutex_unlock(&xd->lock);
 921}
 922
 923static void tb_xdomain_properties_changed(struct work_struct *work)
 924{
 925        struct tb_xdomain *xd = container_of(work, typeof(*xd),
 926                                             properties_changed_work.work);
 927        int ret;
 928
 929        ret = tb_xdp_properties_changed_request(xd->tb->ctl, xd->route,
 930                                xd->properties_changed_retries, xd->local_uuid);
 931        if (ret) {
 932                if (xd->properties_changed_retries-- > 0)
 933                        queue_delayed_work(xd->tb->wq,
 934                                           &xd->properties_changed_work,
 935                                           msecs_to_jiffies(1000));
 936                return;
 937        }
 938
 939        xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
 940}
 941
 942static ssize_t device_show(struct device *dev, struct device_attribute *attr,
 943                           char *buf)
 944{
 945        struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
 946
 947        return sprintf(buf, "%#x\n", xd->device);
 948}
 949static DEVICE_ATTR_RO(device);
 950
 951static ssize_t
 952device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
 953{
 954        struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
 955        int ret;
 956
 957        if (mutex_lock_interruptible(&xd->lock))
 958                return -ERESTARTSYS;
 959        ret = sprintf(buf, "%s\n", xd->device_name ? xd->device_name : "");
 960        mutex_unlock(&xd->lock);
 961
 962        return ret;
 963}
 964static DEVICE_ATTR_RO(device_name);
 965
 966static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
 967                           char *buf)
 968{
 969        struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
 970
 971        return sprintf(buf, "%#x\n", xd->vendor);
 972}
 973static DEVICE_ATTR_RO(vendor);
 974
 975static ssize_t
 976vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
 977{
 978        struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
 979        int ret;
 980
 981        if (mutex_lock_interruptible(&xd->lock))
 982                return -ERESTARTSYS;
 983        ret = sprintf(buf, "%s\n", xd->vendor_name ? xd->vendor_name : "");
 984        mutex_unlock(&xd->lock);
 985
 986        return ret;
 987}
 988static DEVICE_ATTR_RO(vendor_name);
 989
 990static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
 991                              char *buf)
 992{
 993        struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
 994
 995        return sprintf(buf, "%pUb\n", xd->remote_uuid);
 996}
 997static DEVICE_ATTR_RO(unique_id);
 998
 999static struct attribute *xdomain_attrs[] = {
1000        &dev_attr_device.attr,
1001        &dev_attr_device_name.attr,
1002        &dev_attr_unique_id.attr,
1003        &dev_attr_vendor.attr,
1004        &dev_attr_vendor_name.attr,
1005        NULL,
1006};
1007
1008static struct attribute_group xdomain_attr_group = {
1009        .attrs = xdomain_attrs,
1010};
1011
1012static const struct attribute_group *xdomain_attr_groups[] = {
1013        &xdomain_attr_group,
1014        NULL,
1015};
1016
1017static void tb_xdomain_release(struct device *dev)
1018{
1019        struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1020
1021        put_device(xd->dev.parent);
1022
1023        tb_property_free_dir(xd->properties);
1024        ida_destroy(&xd->service_ids);
1025
1026        kfree(xd->local_uuid);
1027        kfree(xd->remote_uuid);
1028        kfree(xd->device_name);
1029        kfree(xd->vendor_name);
1030        kfree(xd);
1031}
1032
1033static void start_handshake(struct tb_xdomain *xd)
1034{
1035        xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
1036        xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
1037
1038        /* Start exchanging properties with the other host */
1039        queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
1040                           msecs_to_jiffies(100));
1041        queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
1042                           msecs_to_jiffies(1000));
1043}
1044
1045static void stop_handshake(struct tb_xdomain *xd)
1046{
1047        xd->properties_retries = 0;
1048        xd->properties_changed_retries = 0;
1049
1050        cancel_delayed_work_sync(&xd->get_properties_work);
1051        cancel_delayed_work_sync(&xd->properties_changed_work);
1052}
1053
1054static int __maybe_unused tb_xdomain_suspend(struct device *dev)
1055{
1056        stop_handshake(tb_to_xdomain(dev));
1057        return 0;
1058}
1059
1060static int __maybe_unused tb_xdomain_resume(struct device *dev)
1061{
1062        struct tb_xdomain *xd = tb_to_xdomain(dev);
1063
1064        /*
1065         * Ask tb_xdomain_get_properties() restore any existing DMA
1066         * paths after properties are re-read.
1067         */
1068        xd->resume = true;
1069        start_handshake(xd);
1070
1071        return 0;
1072}
1073
1074static const struct dev_pm_ops tb_xdomain_pm_ops = {
1075        SET_SYSTEM_SLEEP_PM_OPS(tb_xdomain_suspend, tb_xdomain_resume)
1076};
1077
1078struct device_type tb_xdomain_type = {
1079        .name = "thunderbolt_xdomain",
1080        .release = tb_xdomain_release,
1081        .pm = &tb_xdomain_pm_ops,
1082};
1083EXPORT_SYMBOL_GPL(tb_xdomain_type);
1084
1085/**
1086 * tb_xdomain_alloc() - Allocate new XDomain object
1087 * @tb: Domain where the XDomain belongs
1088 * @parent: Parent device (the switch through the connection to the
1089 *          other domain is reached).
1090 * @route: Route string used to reach the other domain
1091 * @local_uuid: Our local domain UUID
1092 * @remote_uuid: UUID of the other domain
1093 *
1094 * Allocates new XDomain structure and returns pointer to that. The
1095 * object must be released by calling tb_xdomain_put().
1096 */
1097struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
1098                                    u64 route, const uuid_t *local_uuid,
1099                                    const uuid_t *remote_uuid)
1100{
1101        struct tb_xdomain *xd;
1102
1103        xd = kzalloc(sizeof(*xd), GFP_KERNEL);
1104        if (!xd)
1105                return NULL;
1106
1107        xd->tb = tb;
1108        xd->route = route;
1109        ida_init(&xd->service_ids);
1110        mutex_init(&xd->lock);
1111        INIT_DELAYED_WORK(&xd->get_properties_work, tb_xdomain_get_properties);
1112        INIT_DELAYED_WORK(&xd->properties_changed_work,
1113                          tb_xdomain_properties_changed);
1114
1115        xd->local_uuid = kmemdup(local_uuid, sizeof(uuid_t), GFP_KERNEL);
1116        if (!xd->local_uuid)
1117                goto err_free;
1118
1119        xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t), GFP_KERNEL);
1120        if (!xd->remote_uuid)
1121                goto err_free_local_uuid;
1122
1123        device_initialize(&xd->dev);
1124        xd->dev.parent = get_device(parent);
1125        xd->dev.bus = &tb_bus_type;
1126        xd->dev.type = &tb_xdomain_type;
1127        xd->dev.groups = xdomain_attr_groups;
1128        dev_set_name(&xd->dev, "%u-%llx", tb->index, route);
1129
1130        /*
1131         * This keeps the DMA powered on as long as we have active
1132         * connection to another host.
1133         */
1134        pm_runtime_set_active(&xd->dev);
1135        pm_runtime_get_noresume(&xd->dev);
1136        pm_runtime_enable(&xd->dev);
1137
1138        return xd;
1139
1140err_free_local_uuid:
1141        kfree(xd->local_uuid);
1142err_free:
1143        kfree(xd);
1144
1145        return NULL;
1146}
1147
1148/**
1149 * tb_xdomain_add() - Add XDomain to the bus
1150 * @xd: XDomain to add
1151 *
1152 * This function starts XDomain discovery protocol handshake and
1153 * eventually adds the XDomain to the bus. After calling this function
1154 * the caller needs to call tb_xdomain_remove() in order to remove and
1155 * release the object regardless whether the handshake succeeded or not.
1156 */
1157void tb_xdomain_add(struct tb_xdomain *xd)
1158{
1159        /* Start exchanging properties with the other host */
1160        start_handshake(xd);
1161}
1162
1163static int unregister_service(struct device *dev, void *data)
1164{
1165        device_unregister(dev);
1166        return 0;
1167}
1168
1169/**
1170 * tb_xdomain_remove() - Remove XDomain from the bus
1171 * @xd: XDomain to remove
1172 *
1173 * This will stop all ongoing configuration work and remove the XDomain
1174 * along with any services from the bus. When the last reference to @xd
1175 * is released the object will be released as well.
1176 */
1177void tb_xdomain_remove(struct tb_xdomain *xd)
1178{
1179        stop_handshake(xd);
1180
1181        device_for_each_child_reverse(&xd->dev, xd, unregister_service);
1182
1183        /*
1184         * Undo runtime PM here explicitly because it is possible that
1185         * the XDomain was never added to the bus and thus device_del()
1186         * is not called for it (device_del() would handle this otherwise).
1187         */
1188        pm_runtime_disable(&xd->dev);
1189        pm_runtime_put_noidle(&xd->dev);
1190        pm_runtime_set_suspended(&xd->dev);
1191
1192        if (!device_is_registered(&xd->dev))
1193                put_device(&xd->dev);
1194        else
1195                device_unregister(&xd->dev);
1196}
1197
1198/**
1199 * tb_xdomain_enable_paths() - Enable DMA paths for XDomain connection
1200 * @xd: XDomain connection
1201 * @transmit_path: HopID of the transmit path the other end is using to
1202 *                 send packets
1203 * @transmit_ring: DMA ring used to receive packets from the other end
1204 * @receive_path: HopID of the receive path the other end is using to
1205 *                receive packets
1206 * @receive_ring: DMA ring used to send packets to the other end
1207 *
1208 * The function enables DMA paths accordingly so that after successful
1209 * return the caller can send and receive packets using high-speed DMA
1210 * path.
1211 *
1212 * Return: %0 in case of success and negative errno in case of error
1213 */
1214int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path,
1215                            u16 transmit_ring, u16 receive_path,
1216                            u16 receive_ring)
1217{
1218        int ret;
1219
1220        mutex_lock(&xd->lock);
1221
1222        if (xd->transmit_path) {
1223                ret = xd->transmit_path == transmit_path ? 0 : -EBUSY;
1224                goto exit_unlock;
1225        }
1226
1227        xd->transmit_path = transmit_path;
1228        xd->transmit_ring = transmit_ring;
1229        xd->receive_path = receive_path;
1230        xd->receive_ring = receive_ring;
1231
1232        ret = tb_domain_approve_xdomain_paths(xd->tb, xd);
1233
1234exit_unlock:
1235        mutex_unlock(&xd->lock);
1236
1237        return ret;
1238}
1239EXPORT_SYMBOL_GPL(tb_xdomain_enable_paths);
1240
1241/**
1242 * tb_xdomain_disable_paths() - Disable DMA paths for XDomain connection
1243 * @xd: XDomain connection
1244 *
1245 * This does the opposite of tb_xdomain_enable_paths(). After call to
1246 * this the caller is not expected to use the rings anymore.
1247 *
1248 * Return: %0 in case of success and negative errno in case of error
1249 */
1250int tb_xdomain_disable_paths(struct tb_xdomain *xd)
1251{
1252        int ret = 0;
1253
1254        mutex_lock(&xd->lock);
1255        if (xd->transmit_path) {
1256                xd->transmit_path = 0;
1257                xd->transmit_ring = 0;
1258                xd->receive_path = 0;
1259                xd->receive_ring = 0;
1260
1261                ret = tb_domain_disconnect_xdomain_paths(xd->tb, xd);
1262        }
1263        mutex_unlock(&xd->lock);
1264
1265        return ret;
1266}
1267EXPORT_SYMBOL_GPL(tb_xdomain_disable_paths);
1268
1269struct tb_xdomain_lookup {
1270        const uuid_t *uuid;
1271        u8 link;
1272        u8 depth;
1273        u64 route;
1274};
1275
1276static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw,
1277        const struct tb_xdomain_lookup *lookup)
1278{
1279        int i;
1280
1281        for (i = 1; i <= sw->config.max_port_number; i++) {
1282                struct tb_port *port = &sw->ports[i];
1283                struct tb_xdomain *xd;
1284
1285                if (tb_is_upstream_port(port))
1286                        continue;
1287
1288                if (port->xdomain) {
1289                        xd = port->xdomain;
1290
1291                        if (lookup->uuid) {
1292                                if (uuid_equal(xd->remote_uuid, lookup->uuid))
1293                                        return xd;
1294                        } else if (lookup->link &&
1295                                   lookup->link == xd->link &&
1296                                   lookup->depth == xd->depth) {
1297                                return xd;
1298                        } else if (lookup->route &&
1299                                   lookup->route == xd->route) {
1300                                return xd;
1301                        }
1302                } else if (port->remote) {
1303                        xd = switch_find_xdomain(port->remote->sw, lookup);
1304                        if (xd)
1305                                return xd;
1306                }
1307        }
1308
1309        return NULL;
1310}
1311
1312/**
1313 * tb_xdomain_find_by_uuid() - Find an XDomain by UUID
1314 * @tb: Domain where the XDomain belongs to
1315 * @uuid: UUID to look for
1316 *
1317 * Finds XDomain by walking through the Thunderbolt topology below @tb.
1318 * The returned XDomain will have its reference count increased so the
1319 * caller needs to call tb_xdomain_put() when it is done with the
1320 * object.
1321 *
1322 * This will find all XDomains including the ones that are not yet added
1323 * to the bus (handshake is still in progress).
1324 *
1325 * The caller needs to hold @tb->lock.
1326 */
1327struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid)
1328{
1329        struct tb_xdomain_lookup lookup;
1330        struct tb_xdomain *xd;
1331
1332        memset(&lookup, 0, sizeof(lookup));
1333        lookup.uuid = uuid;
1334
1335        xd = switch_find_xdomain(tb->root_switch, &lookup);
1336        return tb_xdomain_get(xd);
1337}
1338EXPORT_SYMBOL_GPL(tb_xdomain_find_by_uuid);
1339
1340/**
1341 * tb_xdomain_find_by_link_depth() - Find an XDomain by link and depth
1342 * @tb: Domain where the XDomain belongs to
1343 * @link: Root switch link number
1344 * @depth: Depth in the link
1345 *
1346 * Finds XDomain by walking through the Thunderbolt topology below @tb.
1347 * The returned XDomain will have its reference count increased so the
1348 * caller needs to call tb_xdomain_put() when it is done with the
1349 * object.
1350 *
1351 * This will find all XDomains including the ones that are not yet added
1352 * to the bus (handshake is still in progress).
1353 *
1354 * The caller needs to hold @tb->lock.
1355 */
1356struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
1357                                                 u8 depth)
1358{
1359        struct tb_xdomain_lookup lookup;
1360        struct tb_xdomain *xd;
1361
1362        memset(&lookup, 0, sizeof(lookup));
1363        lookup.link = link;
1364        lookup.depth = depth;
1365
1366        xd = switch_find_xdomain(tb->root_switch, &lookup);
1367        return tb_xdomain_get(xd);
1368}
1369
1370/**
1371 * tb_xdomain_find_by_route() - Find an XDomain by route string
1372 * @tb: Domain where the XDomain belongs to
1373 * @route: XDomain route string
1374 *
1375 * Finds XDomain by walking through the Thunderbolt topology below @tb.
1376 * The returned XDomain will have its reference count increased so the
1377 * caller needs to call tb_xdomain_put() when it is done with the
1378 * object.
1379 *
1380 * This will find all XDomains including the ones that are not yet added
1381 * to the bus (handshake is still in progress).
1382 *
1383 * The caller needs to hold @tb->lock.
1384 */
1385struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route)
1386{
1387        struct tb_xdomain_lookup lookup;
1388        struct tb_xdomain *xd;
1389
1390        memset(&lookup, 0, sizeof(lookup));
1391        lookup.route = route;
1392
1393        xd = switch_find_xdomain(tb->root_switch, &lookup);
1394        return tb_xdomain_get(xd);
1395}
1396EXPORT_SYMBOL_GPL(tb_xdomain_find_by_route);
1397
1398bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type,
1399                               const void *buf, size_t size)
1400{
1401        const struct tb_protocol_handler *handler, *tmp;
1402        const struct tb_xdp_header *hdr = buf;
1403        unsigned int length;
1404        int ret = 0;
1405
1406        /* We expect the packet is at least size of the header */
1407        length = hdr->xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK;
1408        if (length != size / 4 - sizeof(hdr->xd_hdr) / 4)
1409                return true;
1410        if (length < sizeof(*hdr) / 4 - sizeof(hdr->xd_hdr) / 4)
1411                return true;
1412
1413        /*
1414         * Handle XDomain discovery protocol packets directly here. For
1415         * other protocols (based on their UUID) we call registered
1416         * handlers in turn.
1417         */
1418        if (uuid_equal(&hdr->uuid, &tb_xdp_uuid)) {
1419                if (type == TB_CFG_PKG_XDOMAIN_REQ) {
1420                        tb_xdp_schedule_request(tb, hdr, size);
1421                        return true;
1422                }
1423                return false;
1424        }
1425
1426        mutex_lock(&xdomain_lock);
1427        list_for_each_entry_safe(handler, tmp, &protocol_handlers, list) {
1428                if (!uuid_equal(&hdr->uuid, handler->uuid))
1429                        continue;
1430
1431                mutex_unlock(&xdomain_lock);
1432                ret = handler->callback(buf, size, handler->data);
1433                mutex_lock(&xdomain_lock);
1434
1435                if (ret)
1436                        break;
1437        }
1438        mutex_unlock(&xdomain_lock);
1439
1440        return ret > 0;
1441}
1442
1443static int rebuild_property_block(void)
1444{
1445        u32 *block, len;
1446        int ret;
1447
1448        ret = tb_property_format_dir(xdomain_property_dir, NULL, 0);
1449        if (ret < 0)
1450                return ret;
1451
1452        len = ret;
1453
1454        block = kcalloc(len, sizeof(u32), GFP_KERNEL);
1455        if (!block)
1456                return -ENOMEM;
1457
1458        ret = tb_property_format_dir(xdomain_property_dir, block, len);
1459        if (ret) {
1460                kfree(block);
1461                return ret;
1462        }
1463
1464        kfree(xdomain_property_block);
1465        xdomain_property_block = block;
1466        xdomain_property_block_len = len;
1467        xdomain_property_block_gen++;
1468
1469        return 0;
1470}
1471
1472static int update_xdomain(struct device *dev, void *data)
1473{
1474        struct tb_xdomain *xd;
1475
1476        xd = tb_to_xdomain(dev);
1477        if (xd) {
1478                queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
1479                                   msecs_to_jiffies(50));
1480        }
1481
1482        return 0;
1483}
1484
1485static void update_all_xdomains(void)
1486{
1487        bus_for_each_dev(&tb_bus_type, NULL, NULL, update_xdomain);
1488}
1489
1490static bool remove_directory(const char *key, const struct tb_property_dir *dir)
1491{
1492        struct tb_property *p;
1493
1494        p = tb_property_find(xdomain_property_dir, key,
1495                             TB_PROPERTY_TYPE_DIRECTORY);
1496        if (p && p->value.dir == dir) {
1497                tb_property_remove(p);
1498                return true;
1499        }
1500        return false;
1501}
1502
1503/**
1504 * tb_register_property_dir() - Register property directory to the host
1505 * @key: Key (name) of the directory to add
1506 * @dir: Directory to add
1507 *
1508 * Service drivers can use this function to add new property directory
1509 * to the host available properties. The other connected hosts are
1510 * notified so they can re-read properties of this host if they are
1511 * interested.
1512 *
1513 * Return: %0 on success and negative errno on failure
1514 */
1515int tb_register_property_dir(const char *key, struct tb_property_dir *dir)
1516{
1517        int ret;
1518
1519        if (WARN_ON(!xdomain_property_dir))
1520                return -EAGAIN;
1521
1522        if (!key || strlen(key) > 8)
1523                return -EINVAL;
1524
1525        mutex_lock(&xdomain_lock);
1526        if (tb_property_find(xdomain_property_dir, key,
1527                             TB_PROPERTY_TYPE_DIRECTORY)) {
1528                ret = -EEXIST;
1529                goto err_unlock;
1530        }
1531
1532        ret = tb_property_add_dir(xdomain_property_dir, key, dir);
1533        if (ret)
1534                goto err_unlock;
1535
1536        ret = rebuild_property_block();
1537        if (ret) {
1538                remove_directory(key, dir);
1539                goto err_unlock;
1540        }
1541
1542        mutex_unlock(&xdomain_lock);
1543        update_all_xdomains();
1544        return 0;
1545
1546err_unlock:
1547        mutex_unlock(&xdomain_lock);
1548        return ret;
1549}
1550EXPORT_SYMBOL_GPL(tb_register_property_dir);
1551
1552/**
1553 * tb_unregister_property_dir() - Removes property directory from host
1554 * @key: Key (name) of the directory
1555 * @dir: Directory to remove
1556 *
1557 * This will remove the existing directory from this host and notify the
1558 * connected hosts about the change.
1559 */
1560void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir)
1561{
1562        int ret = 0;
1563
1564        mutex_lock(&xdomain_lock);
1565        if (remove_directory(key, dir))
1566                ret = rebuild_property_block();
1567        mutex_unlock(&xdomain_lock);
1568
1569        if (!ret)
1570                update_all_xdomains();
1571}
1572EXPORT_SYMBOL_GPL(tb_unregister_property_dir);
1573
1574int tb_xdomain_init(void)
1575{
1576        int ret;
1577
1578        xdomain_property_dir = tb_property_create_dir(NULL);
1579        if (!xdomain_property_dir)
1580                return -ENOMEM;
1581
1582        /*
1583         * Initialize standard set of properties without any service
1584         * directories. Those will be added by service drivers
1585         * themselves when they are loaded.
1586         */
1587        tb_property_add_immediate(xdomain_property_dir, "vendorid",
1588                                  PCI_VENDOR_ID_INTEL);
1589        tb_property_add_text(xdomain_property_dir, "vendorid", "Intel Corp.");
1590        tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1);
1591        tb_property_add_text(xdomain_property_dir, "deviceid",
1592                             utsname()->nodename);
1593        tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100);
1594
1595        ret = rebuild_property_block();
1596        if (ret) {
1597                tb_property_free_dir(xdomain_property_dir);
1598                xdomain_property_dir = NULL;
1599        }
1600
1601        return ret;
1602}
1603
1604void tb_xdomain_exit(void)
1605{
1606        kfree(xdomain_property_block);
1607        tb_property_free_dir(xdomain_property_dir);
1608}
1609