linux/include/linux/thunderbolt.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Thunderbolt service API
   4 *
   5 * Copyright (C) 2014 Andreas Noever <andreas.noever@gmail.com>
   6 * Copyright (C) 2017, Intel Corporation
   7 * Authors: Michael Jamet <michael.jamet@intel.com>
   8 *          Mika Westerberg <mika.westerberg@linux.intel.com>
   9 */
  10
  11#ifndef THUNDERBOLT_H_
  12#define THUNDERBOLT_H_
  13
  14#include <linux/device.h>
  15#include <linux/idr.h>
  16#include <linux/list.h>
  17#include <linux/mutex.h>
  18#include <linux/mod_devicetable.h>
  19#include <linux/pci.h>
  20#include <linux/uuid.h>
  21#include <linux/workqueue.h>
  22
  23enum tb_cfg_pkg_type {
  24        TB_CFG_PKG_READ = 1,
  25        TB_CFG_PKG_WRITE = 2,
  26        TB_CFG_PKG_ERROR = 3,
  27        TB_CFG_PKG_NOTIFY_ACK = 4,
  28        TB_CFG_PKG_EVENT = 5,
  29        TB_CFG_PKG_XDOMAIN_REQ = 6,
  30        TB_CFG_PKG_XDOMAIN_RESP = 7,
  31        TB_CFG_PKG_OVERRIDE = 8,
  32        TB_CFG_PKG_RESET = 9,
  33        TB_CFG_PKG_ICM_EVENT = 10,
  34        TB_CFG_PKG_ICM_CMD = 11,
  35        TB_CFG_PKG_ICM_RESP = 12,
  36        TB_CFG_PKG_PREPARE_TO_SLEEP = 13,
  37};
  38
  39/**
  40 * enum tb_security_level - Thunderbolt security level
  41 * @TB_SECURITY_NONE: No security, legacy mode
  42 * @TB_SECURITY_USER: User approval required at minimum
  43 * @TB_SECURITY_SECURE: One time saved key required at minimum
  44 * @TB_SECURITY_DPONLY: Only tunnel Display port (and USB)
  45 * @TB_SECURITY_USBONLY: Only tunnel USB controller of the connected
  46 *                       Thunderbolt dock (and Display Port). All PCIe
  47 *                       links downstream of the dock are removed.
  48 */
  49enum tb_security_level {
  50        TB_SECURITY_NONE,
  51        TB_SECURITY_USER,
  52        TB_SECURITY_SECURE,
  53        TB_SECURITY_DPONLY,
  54        TB_SECURITY_USBONLY,
  55};
  56
  57/**
  58 * struct tb - main thunderbolt bus structure
  59 * @dev: Domain device
  60 * @lock: Big lock. Must be held when accessing any struct
  61 *        tb_switch / struct tb_port.
  62 * @nhi: Pointer to the NHI structure
  63 * @ctl: Control channel for this domain
  64 * @wq: Ordered workqueue for all domain specific work
  65 * @root_switch: Root switch of this domain
  66 * @cm_ops: Connection manager specific operations vector
  67 * @index: Linux assigned domain number
  68 * @security_level: Current security level
  69 * @nboot_acl: Number of boot ACLs the domain supports
  70 * @privdata: Private connection manager specific data
  71 */
  72struct tb {
  73        struct device dev;
  74        struct mutex lock;
  75        struct tb_nhi *nhi;
  76        struct tb_ctl *ctl;
  77        struct workqueue_struct *wq;
  78        struct tb_switch *root_switch;
  79        const struct tb_cm_ops *cm_ops;
  80        int index;
  81        enum tb_security_level security_level;
  82        size_t nboot_acl;
  83        unsigned long privdata[0];
  84};
  85
  86extern struct bus_type tb_bus_type;
  87extern struct device_type tb_service_type;
  88extern struct device_type tb_xdomain_type;
  89
  90#define TB_LINKS_PER_PHY_PORT   2
  91
  92static inline unsigned int tb_phy_port_from_link(unsigned int link)
  93{
  94        return (link - 1) / TB_LINKS_PER_PHY_PORT;
  95}
  96
  97/**
  98 * struct tb_property_dir - XDomain property directory
  99 * @uuid: Directory UUID or %NULL if root directory
 100 * @properties: List of properties in this directory
 101 *
 102 * User needs to provide serialization if needed.
 103 */
 104struct tb_property_dir {
 105        const uuid_t *uuid;
 106        struct list_head properties;
 107};
 108
 109enum tb_property_type {
 110        TB_PROPERTY_TYPE_UNKNOWN = 0x00,
 111        TB_PROPERTY_TYPE_DIRECTORY = 0x44,
 112        TB_PROPERTY_TYPE_DATA = 0x64,
 113        TB_PROPERTY_TYPE_TEXT = 0x74,
 114        TB_PROPERTY_TYPE_VALUE = 0x76,
 115};
 116
 117#define TB_PROPERTY_KEY_SIZE    8
 118
 119/**
 120 * struct tb_property - XDomain property
 121 * @list: Used to link properties together in a directory
 122 * @key: Key for the property (always terminated).
 123 * @type: Type of the property
 124 * @length: Length of the property data in dwords
 125 * @value: Property value
 126 *
 127 * Users use @type to determine which field in @value is filled.
 128 */
 129struct tb_property {
 130        struct list_head list;
 131        char key[TB_PROPERTY_KEY_SIZE + 1];
 132        enum tb_property_type type;
 133        size_t length;
 134        union {
 135                struct tb_property_dir *dir;
 136                u8 *data;
 137                char *text;
 138                u32 immediate;
 139        } value;
 140};
 141
 142struct tb_property_dir *tb_property_parse_dir(const u32 *block,
 143                                              size_t block_len);
 144ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block,
 145                               size_t block_len);
 146struct tb_property_dir *tb_property_create_dir(const uuid_t *uuid);
 147void tb_property_free_dir(struct tb_property_dir *dir);
 148int tb_property_add_immediate(struct tb_property_dir *parent, const char *key,
 149                              u32 value);
 150int tb_property_add_data(struct tb_property_dir *parent, const char *key,
 151                         const void *buf, size_t buflen);
 152int tb_property_add_text(struct tb_property_dir *parent, const char *key,
 153                         const char *text);
 154int tb_property_add_dir(struct tb_property_dir *parent, const char *key,
 155                        struct tb_property_dir *dir);
 156void tb_property_remove(struct tb_property *tb_property);
 157struct tb_property *tb_property_find(struct tb_property_dir *dir,
 158                        const char *key, enum tb_property_type type);
 159struct tb_property *tb_property_get_next(struct tb_property_dir *dir,
 160                                         struct tb_property *prev);
 161
 162#define tb_property_for_each(dir, property)                     \
 163        for (property = tb_property_get_next(dir, NULL);        \
 164             property;                                          \
 165             property = tb_property_get_next(dir, property))
 166
 167int tb_register_property_dir(const char *key, struct tb_property_dir *dir);
 168void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
 169
 170/**
 171 * struct tb_xdomain - Cross-domain (XDomain) connection
 172 * @dev: XDomain device
 173 * @tb: Pointer to the domain
 174 * @remote_uuid: UUID of the remote domain (host)
 175 * @local_uuid: Cached local UUID
 176 * @route: Route string the other domain can be reached
 177 * @vendor: Vendor ID of the remote domain
 178 * @device: Device ID of the demote domain
 179 * @lock: Lock to serialize access to the following fields of this structure
 180 * @vendor_name: Name of the vendor (or %NULL if not known)
 181 * @device_name: Name of the device (or %NULL if not known)
 182 * @is_unplugged: The XDomain is unplugged
 183 * @resume: The XDomain is being resumed
 184 * @needs_uuid: If the XDomain does not have @remote_uuid it will be
 185 *              queried first
 186 * @transmit_path: HopID which the remote end expects us to transmit
 187 * @transmit_ring: Local ring (hop) where outgoing packets are pushed
 188 * @receive_path: HopID which we expect the remote end to transmit
 189 * @receive_ring: Local ring (hop) where incoming packets arrive
 190 * @service_ids: Used to generate IDs for the services
 191 * @properties: Properties exported by the remote domain
 192 * @property_block_gen: Generation of @properties
 193 * @properties_lock: Lock protecting @properties.
 194 * @get_uuid_work: Work used to retrieve @remote_uuid
 195 * @uuid_retries: Number of times left @remote_uuid is requested before
 196 *                giving up
 197 * @get_properties_work: Work used to get remote domain properties
 198 * @properties_retries: Number of times left to read properties
 199 * @properties_changed_work: Work used to notify the remote domain that
 200 *                           our properties have changed
 201 * @properties_changed_retries: Number of times left to send properties
 202 *                              changed notification
 203 * @link: Root switch link the remote domain is connected (ICM only)
 204 * @depth: Depth in the chain the remote domain is connected (ICM only)
 205 *
 206 * This structure represents connection across two domains (hosts).
 207 * Each XDomain contains zero or more services which are exposed as
 208 * &struct tb_service objects.
 209 *
 210 * Service drivers may access this structure if they need to enumerate
 211 * non-standard properties but they need hold @lock when doing so
 212 * because properties can be changed asynchronously in response to
 213 * changes in the remote domain.
 214 */
 215struct tb_xdomain {
 216        struct device dev;
 217        struct tb *tb;
 218        uuid_t *remote_uuid;
 219        const uuid_t *local_uuid;
 220        u64 route;
 221        u16 vendor;
 222        u16 device;
 223        struct mutex lock;
 224        const char *vendor_name;
 225        const char *device_name;
 226        bool is_unplugged;
 227        bool resume;
 228        bool needs_uuid;
 229        u16 transmit_path;
 230        u16 transmit_ring;
 231        u16 receive_path;
 232        u16 receive_ring;
 233        struct ida service_ids;
 234        struct tb_property_dir *properties;
 235        u32 property_block_gen;
 236        struct delayed_work get_uuid_work;
 237        int uuid_retries;
 238        struct delayed_work get_properties_work;
 239        int properties_retries;
 240        struct delayed_work properties_changed_work;
 241        int properties_changed_retries;
 242        u8 link;
 243        u8 depth;
 244};
 245
 246int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path,
 247                            u16 transmit_ring, u16 receive_path,
 248                            u16 receive_ring);
 249int tb_xdomain_disable_paths(struct tb_xdomain *xd);
 250struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid);
 251struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route);
 252
 253static inline struct tb_xdomain *
 254tb_xdomain_find_by_uuid_locked(struct tb *tb, const uuid_t *uuid)
 255{
 256        struct tb_xdomain *xd;
 257
 258        mutex_lock(&tb->lock);
 259        xd = tb_xdomain_find_by_uuid(tb, uuid);
 260        mutex_unlock(&tb->lock);
 261
 262        return xd;
 263}
 264
 265static inline struct tb_xdomain *
 266tb_xdomain_find_by_route_locked(struct tb *tb, u64 route)
 267{
 268        struct tb_xdomain *xd;
 269
 270        mutex_lock(&tb->lock);
 271        xd = tb_xdomain_find_by_route(tb, route);
 272        mutex_unlock(&tb->lock);
 273
 274        return xd;
 275}
 276
 277static inline struct tb_xdomain *tb_xdomain_get(struct tb_xdomain *xd)
 278{
 279        if (xd)
 280                get_device(&xd->dev);
 281        return xd;
 282}
 283
 284static inline void tb_xdomain_put(struct tb_xdomain *xd)
 285{
 286        if (xd)
 287                put_device(&xd->dev);
 288}
 289
 290static inline bool tb_is_xdomain(const struct device *dev)
 291{
 292        return dev->type == &tb_xdomain_type;
 293}
 294
 295static inline struct tb_xdomain *tb_to_xdomain(struct device *dev)
 296{
 297        if (tb_is_xdomain(dev))
 298                return container_of(dev, struct tb_xdomain, dev);
 299        return NULL;
 300}
 301
 302int tb_xdomain_response(struct tb_xdomain *xd, const void *response,
 303                        size_t size, enum tb_cfg_pkg_type type);
 304int tb_xdomain_request(struct tb_xdomain *xd, const void *request,
 305                       size_t request_size, enum tb_cfg_pkg_type request_type,
 306                       void *response, size_t response_size,
 307                       enum tb_cfg_pkg_type response_type,
 308                       unsigned int timeout_msec);
 309
 310/**
 311 * tb_protocol_handler - Protocol specific handler
 312 * @uuid: XDomain messages with this UUID are dispatched to this handler
 313 * @callback: Callback called with the XDomain message. Returning %1
 314 *            here tells the XDomain core that the message was handled
 315 *            by this handler and should not be forwared to other
 316 *            handlers.
 317 * @data: Data passed with the callback
 318 * @list: Handlers are linked using this
 319 *
 320 * Thunderbolt services can hook into incoming XDomain requests by
 321 * registering protocol handler. Only limitation is that the XDomain
 322 * discovery protocol UUID cannot be registered since it is handled by
 323 * the core XDomain code.
 324 *
 325 * The @callback must check that the message is really directed to the
 326 * service the driver implements.
 327 */
 328struct tb_protocol_handler {
 329        const uuid_t *uuid;
 330        int (*callback)(const void *buf, size_t size, void *data);
 331        void *data;
 332        struct list_head list;
 333};
 334
 335int tb_register_protocol_handler(struct tb_protocol_handler *handler);
 336void tb_unregister_protocol_handler(struct tb_protocol_handler *handler);
 337
 338/**
 339 * struct tb_service - Thunderbolt service
 340 * @dev: XDomain device
 341 * @id: ID of the service (shown in sysfs)
 342 * @key: Protocol key from the properties directory
 343 * @prtcid: Protocol ID from the properties directory
 344 * @prtcvers: Protocol version from the properties directory
 345 * @prtcrevs: Protocol software revision from the properties directory
 346 * @prtcstns: Protocol settings mask from the properties directory
 347 *
 348 * Each domain exposes set of services it supports as collection of
 349 * properties. For each service there will be one corresponding
 350 * &struct tb_service. Service drivers are bound to these.
 351 */
 352struct tb_service {
 353        struct device dev;
 354        int id;
 355        const char *key;
 356        u32 prtcid;
 357        u32 prtcvers;
 358        u32 prtcrevs;
 359        u32 prtcstns;
 360};
 361
 362static inline struct tb_service *tb_service_get(struct tb_service *svc)
 363{
 364        if (svc)
 365                get_device(&svc->dev);
 366        return svc;
 367}
 368
 369static inline void tb_service_put(struct tb_service *svc)
 370{
 371        if (svc)
 372                put_device(&svc->dev);
 373}
 374
 375static inline bool tb_is_service(const struct device *dev)
 376{
 377        return dev->type == &tb_service_type;
 378}
 379
 380static inline struct tb_service *tb_to_service(struct device *dev)
 381{
 382        if (tb_is_service(dev))
 383                return container_of(dev, struct tb_service, dev);
 384        return NULL;
 385}
 386
 387/**
 388 * tb_service_driver - Thunderbolt service driver
 389 * @driver: Driver structure
 390 * @probe: Called when the driver is probed
 391 * @remove: Called when the driver is removed (optional)
 392 * @shutdown: Called at shutdown time to stop the service (optional)
 393 * @id_table: Table of service identifiers the driver supports
 394 */
 395struct tb_service_driver {
 396        struct device_driver driver;
 397        int (*probe)(struct tb_service *svc, const struct tb_service_id *id);
 398        void (*remove)(struct tb_service *svc);
 399        void (*shutdown)(struct tb_service *svc);
 400        const struct tb_service_id *id_table;
 401};
 402
 403#define TB_SERVICE(key, id)                             \
 404        .match_flags = TBSVC_MATCH_PROTOCOL_KEY |       \
 405                       TBSVC_MATCH_PROTOCOL_ID,         \
 406        .protocol_key = (key),                          \
 407        .protocol_id = (id)
 408
 409int tb_register_service_driver(struct tb_service_driver *drv);
 410void tb_unregister_service_driver(struct tb_service_driver *drv);
 411
 412static inline void *tb_service_get_drvdata(const struct tb_service *svc)
 413{
 414        return dev_get_drvdata(&svc->dev);
 415}
 416
 417static inline void tb_service_set_drvdata(struct tb_service *svc, void *data)
 418{
 419        dev_set_drvdata(&svc->dev, data);
 420}
 421
 422static inline struct tb_xdomain *tb_service_parent(struct tb_service *svc)
 423{
 424        return tb_to_xdomain(svc->dev.parent);
 425}
 426
 427/**
 428 * struct tb_nhi - thunderbolt native host interface
 429 * @lock: Must be held during ring creation/destruction. Is acquired by
 430 *        interrupt_work when dispatching interrupts to individual rings.
 431 * @pdev: Pointer to the PCI device
 432 * @iobase: MMIO space of the NHI
 433 * @tx_rings: All Tx rings available on this host controller
 434 * @rx_rings: All Rx rings available on this host controller
 435 * @msix_ida: Used to allocate MSI-X vectors for rings
 436 * @going_away: The host controller device is about to disappear so when
 437 *              this flag is set, avoid touching the hardware anymore.
 438 * @interrupt_work: Work scheduled to handle ring interrupt when no
 439 *                  MSI-X is used.
 440 * @hop_count: Number of rings (end point hops) supported by NHI.
 441 */
 442struct tb_nhi {
 443        spinlock_t lock;
 444        struct pci_dev *pdev;
 445        void __iomem *iobase;
 446        struct tb_ring **tx_rings;
 447        struct tb_ring **rx_rings;
 448        struct ida msix_ida;
 449        bool going_away;
 450        struct work_struct interrupt_work;
 451        u32 hop_count;
 452};
 453
 454/**
 455 * struct tb_ring - thunderbolt TX or RX ring associated with a NHI
 456 * @lock: Lock serializing actions to this ring. Must be acquired after
 457 *        nhi->lock.
 458 * @nhi: Pointer to the native host controller interface
 459 * @size: Size of the ring
 460 * @hop: Hop (DMA channel) associated with this ring
 461 * @head: Head of the ring (write next descriptor here)
 462 * @tail: Tail of the ring (complete next descriptor here)
 463 * @descriptors: Allocated descriptors for this ring
 464 * @queue: Queue holding frames to be transferred over this ring
 465 * @in_flight: Queue holding frames that are currently in flight
 466 * @work: Interrupt work structure
 467 * @is_tx: Is the ring Tx or Rx
 468 * @running: Is the ring running
 469 * @irq: MSI-X irq number if the ring uses MSI-X. %0 otherwise.
 470 * @vector: MSI-X vector number the ring uses (only set if @irq is > 0)
 471 * @flags: Ring specific flags
 472 * @sof_mask: Bit mask used to detect start of frame PDF
 473 * @eof_mask: Bit mask used to detect end of frame PDF
 474 * @start_poll: Called when ring interrupt is triggered to start
 475 *              polling. Passing %NULL keeps the ring in interrupt mode.
 476 * @poll_data: Data passed to @start_poll
 477 */
 478struct tb_ring {
 479        spinlock_t lock;
 480        struct tb_nhi *nhi;
 481        int size;
 482        int hop;
 483        int head;
 484        int tail;
 485        struct ring_desc *descriptors;
 486        dma_addr_t descriptors_dma;
 487        struct list_head queue;
 488        struct list_head in_flight;
 489        struct work_struct work;
 490        bool is_tx:1;
 491        bool running:1;
 492        int irq;
 493        u8 vector;
 494        unsigned int flags;
 495        u16 sof_mask;
 496        u16 eof_mask;
 497        void (*start_poll)(void *data);
 498        void *poll_data;
 499};
 500
 501/* Leave ring interrupt enabled on suspend */
 502#define RING_FLAG_NO_SUSPEND    BIT(0)
 503/* Configure the ring to be in frame mode */
 504#define RING_FLAG_FRAME         BIT(1)
 505/* Enable end-to-end flow control */
 506#define RING_FLAG_E2E           BIT(2)
 507
 508struct ring_frame;
 509typedef void (*ring_cb)(struct tb_ring *, struct ring_frame *, bool canceled);
 510
 511/**
 512 * enum ring_desc_flags - Flags for DMA ring descriptor
 513 * %RING_DESC_ISOCH: Enable isonchronous DMA (Tx only)
 514 * %RING_DESC_CRC_ERROR: In frame mode CRC check failed for the frame (Rx only)
 515 * %RING_DESC_COMPLETED: Descriptor completed (set by NHI)
 516 * %RING_DESC_POSTED: Always set this
 517 * %RING_DESC_BUFFER_OVERRUN: RX buffer overrun
 518 * %RING_DESC_INTERRUPT: Request an interrupt on completion
 519 */
 520enum ring_desc_flags {
 521        RING_DESC_ISOCH = 0x1,
 522        RING_DESC_CRC_ERROR = 0x1,
 523        RING_DESC_COMPLETED = 0x2,
 524        RING_DESC_POSTED = 0x4,
 525        RING_DESC_BUFFER_OVERRUN = 0x04,
 526        RING_DESC_INTERRUPT = 0x8,
 527};
 528
 529/**
 530 * struct ring_frame - For use with ring_rx/ring_tx
 531 * @buffer_phy: DMA mapped address of the frame
 532 * @callback: Callback called when the frame is finished (optional)
 533 * @list: Frame is linked to a queue using this
 534 * @size: Size of the frame in bytes (%0 means %4096)
 535 * @flags: Flags for the frame (see &enum ring_desc_flags)
 536 * @eof: End of frame protocol defined field
 537 * @sof: Start of frame protocol defined field
 538 */
 539struct ring_frame {
 540        dma_addr_t buffer_phy;
 541        ring_cb callback;
 542        struct list_head list;
 543        u32 size:12;
 544        u32 flags:12;
 545        u32 eof:4;
 546        u32 sof:4;
 547};
 548
 549/* Minimum size for ring_rx */
 550#define TB_FRAME_SIZE           0x100
 551
 552struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
 553                                 unsigned int flags);
 554struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
 555                                 unsigned int flags, u16 sof_mask, u16 eof_mask,
 556                                 void (*start_poll)(void *), void *poll_data);
 557void tb_ring_start(struct tb_ring *ring);
 558void tb_ring_stop(struct tb_ring *ring);
 559void tb_ring_free(struct tb_ring *ring);
 560
 561int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame);
 562
 563/**
 564 * tb_ring_rx() - enqueue a frame on an RX ring
 565 * @ring: Ring to enqueue the frame
 566 * @frame: Frame to enqueue
 567 *
 568 * @frame->buffer, @frame->buffer_phy have to be set. The buffer must
 569 * contain at least %TB_FRAME_SIZE bytes.
 570 *
 571 * @frame->callback will be invoked with @frame->size, @frame->flags,
 572 * @frame->eof, @frame->sof set once the frame has been received.
 573 *
 574 * If ring_stop() is called after the packet has been enqueued
 575 * @frame->callback will be called with canceled set to true.
 576 *
 577 * Return: Returns %-ESHUTDOWN if ring_stop has been called. Zero otherwise.
 578 */
 579static inline int tb_ring_rx(struct tb_ring *ring, struct ring_frame *frame)
 580{
 581        WARN_ON(ring->is_tx);
 582        return __tb_ring_enqueue(ring, frame);
 583}
 584
 585/**
 586 * tb_ring_tx() - enqueue a frame on an TX ring
 587 * @ring: Ring the enqueue the frame
 588 * @frame: Frame to enqueue
 589 *
 590 * @frame->buffer, @frame->buffer_phy, @frame->size, @frame->eof and
 591 * @frame->sof have to be set.
 592 *
 593 * @frame->callback will be invoked with once the frame has been transmitted.
 594 *
 595 * If ring_stop() is called after the packet has been enqueued @frame->callback
 596 * will be called with canceled set to true.
 597 *
 598 * Return: Returns %-ESHUTDOWN if ring_stop has been called. Zero otherwise.
 599 */
 600static inline int tb_ring_tx(struct tb_ring *ring, struct ring_frame *frame)
 601{
 602        WARN_ON(!ring->is_tx);
 603        return __tb_ring_enqueue(ring, frame);
 604}
 605
 606/* Used only when the ring is in polling mode */
 607struct ring_frame *tb_ring_poll(struct tb_ring *ring);
 608void tb_ring_poll_complete(struct tb_ring *ring);
 609
 610/**
 611 * tb_ring_dma_device() - Return device used for DMA mapping
 612 * @ring: Ring whose DMA device is retrieved
 613 *
 614 * Use this function when you are mapping DMA for buffers that are
 615 * passed to the ring for sending/receiving.
 616 */
 617static inline struct device *tb_ring_dma_device(struct tb_ring *ring)
 618{
 619        return &ring->nhi->pdev->dev;
 620}
 621
 622#endif /* THUNDERBOLT_H_ */
 623