linux/include/linux/thunderbolt.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Thunderbolt service API
   4 *
   5 * Copyright (C) 2014 Andreas Noever <andreas.noever@gmail.com>
   6 * Copyright (C) 2017, Intel Corporation
   7 * Authors: Michael Jamet <michael.jamet@intel.com>
   8 *          Mika Westerberg <mika.westerberg@linux.intel.com>
   9 */
  10
  11#ifndef THUNDERBOLT_H_
  12#define THUNDERBOLT_H_
  13
  14#include <linux/device.h>
  15#include <linux/idr.h>
  16#include <linux/list.h>
  17#include <linux/mutex.h>
  18#include <linux/mod_devicetable.h>
  19#include <linux/pci.h>
  20#include <linux/uuid.h>
  21#include <linux/workqueue.h>
  22
  23enum tb_cfg_pkg_type {
  24        TB_CFG_PKG_READ = 1,
  25        TB_CFG_PKG_WRITE = 2,
  26        TB_CFG_PKG_ERROR = 3,
  27        TB_CFG_PKG_NOTIFY_ACK = 4,
  28        TB_CFG_PKG_EVENT = 5,
  29        TB_CFG_PKG_XDOMAIN_REQ = 6,
  30        TB_CFG_PKG_XDOMAIN_RESP = 7,
  31        TB_CFG_PKG_OVERRIDE = 8,
  32        TB_CFG_PKG_RESET = 9,
  33        TB_CFG_PKG_ICM_EVENT = 10,
  34        TB_CFG_PKG_ICM_CMD = 11,
  35        TB_CFG_PKG_ICM_RESP = 12,
  36        TB_CFG_PKG_PREPARE_TO_SLEEP = 13,
  37};
  38
  39/**
  40 * enum tb_security_level - Thunderbolt security level
  41 * @TB_SECURITY_NONE: No security, legacy mode
  42 * @TB_SECURITY_USER: User approval required at minimum
  43 * @TB_SECURITY_SECURE: One time saved key required at minimum
  44 * @TB_SECURITY_DPONLY: Only tunnel Display port (and USB)
  45 * @TB_SECURITY_USBONLY: Only tunnel USB controller of the connected
  46 *                       Thunderbolt dock (and Display Port). All PCIe
  47 *                       links downstream of the dock are removed.
  48 * @TB_SECURITY_NOPCIE: For USB4 systems this level is used when the
  49 *                      PCIe tunneling is disabled from the BIOS.
  50 */
  51enum tb_security_level {
  52        TB_SECURITY_NONE,
  53        TB_SECURITY_USER,
  54        TB_SECURITY_SECURE,
  55        TB_SECURITY_DPONLY,
  56        TB_SECURITY_USBONLY,
  57        TB_SECURITY_NOPCIE,
  58};
  59
  60/**
  61 * struct tb - main thunderbolt bus structure
  62 * @dev: Domain device
  63 * @lock: Big lock. Must be held when accessing any struct
  64 *        tb_switch / struct tb_port.
  65 * @nhi: Pointer to the NHI structure
  66 * @ctl: Control channel for this domain
  67 * @wq: Ordered workqueue for all domain specific work
  68 * @root_switch: Root switch of this domain
  69 * @cm_ops: Connection manager specific operations vector
  70 * @index: Linux assigned domain number
  71 * @security_level: Current security level
  72 * @nboot_acl: Number of boot ACLs the domain supports
  73 * @privdata: Private connection manager specific data
  74 */
  75struct tb {
  76        struct device dev;
  77        struct mutex lock;
  78        struct tb_nhi *nhi;
  79        struct tb_ctl *ctl;
  80        struct workqueue_struct *wq;
  81        struct tb_switch *root_switch;
  82        const struct tb_cm_ops *cm_ops;
  83        int index;
  84        enum tb_security_level security_level;
  85        size_t nboot_acl;
  86        unsigned long privdata[];
  87};
  88
  89extern struct bus_type tb_bus_type;
  90extern struct device_type tb_service_type;
  91extern struct device_type tb_xdomain_type;
  92
  93#define TB_LINKS_PER_PHY_PORT   2
  94
  95static inline unsigned int tb_phy_port_from_link(unsigned int link)
  96{
  97        return (link - 1) / TB_LINKS_PER_PHY_PORT;
  98}
  99
 100/**
 101 * struct tb_property_dir - XDomain property directory
 102 * @uuid: Directory UUID or %NULL if root directory
 103 * @properties: List of properties in this directory
 104 *
 105 * User needs to provide serialization if needed.
 106 */
 107struct tb_property_dir {
 108        const uuid_t *uuid;
 109        struct list_head properties;
 110};
 111
 112enum tb_property_type {
 113        TB_PROPERTY_TYPE_UNKNOWN = 0x00,
 114        TB_PROPERTY_TYPE_DIRECTORY = 0x44,
 115        TB_PROPERTY_TYPE_DATA = 0x64,
 116        TB_PROPERTY_TYPE_TEXT = 0x74,
 117        TB_PROPERTY_TYPE_VALUE = 0x76,
 118};
 119
 120#define TB_PROPERTY_KEY_SIZE    8
 121
 122/**
 123 * struct tb_property - XDomain property
 124 * @list: Used to link properties together in a directory
 125 * @key: Key for the property (always terminated).
 126 * @type: Type of the property
 127 * @length: Length of the property data in dwords
 128 * @value: Property value
 129 *
 130 * Users use @type to determine which field in @value is filled.
 131 */
 132struct tb_property {
 133        struct list_head list;
 134        char key[TB_PROPERTY_KEY_SIZE + 1];
 135        enum tb_property_type type;
 136        size_t length;
 137        union {
 138                struct tb_property_dir *dir;
 139                u8 *data;
 140                char *text;
 141                u32 immediate;
 142        } value;
 143};
 144
 145struct tb_property_dir *tb_property_parse_dir(const u32 *block,
 146                                              size_t block_len);
 147ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block,
 148                               size_t block_len);
 149struct tb_property_dir *tb_property_copy_dir(const struct tb_property_dir *dir);
 150struct tb_property_dir *tb_property_create_dir(const uuid_t *uuid);
 151void tb_property_free_dir(struct tb_property_dir *dir);
 152int tb_property_add_immediate(struct tb_property_dir *parent, const char *key,
 153                              u32 value);
 154int tb_property_add_data(struct tb_property_dir *parent, const char *key,
 155                         const void *buf, size_t buflen);
 156int tb_property_add_text(struct tb_property_dir *parent, const char *key,
 157                         const char *text);
 158int tb_property_add_dir(struct tb_property_dir *parent, const char *key,
 159                        struct tb_property_dir *dir);
 160void tb_property_remove(struct tb_property *tb_property);
 161struct tb_property *tb_property_find(struct tb_property_dir *dir,
 162                        const char *key, enum tb_property_type type);
 163struct tb_property *tb_property_get_next(struct tb_property_dir *dir,
 164                                         struct tb_property *prev);
 165
 166#define tb_property_for_each(dir, property)                     \
 167        for (property = tb_property_get_next(dir, NULL);        \
 168             property;                                          \
 169             property = tb_property_get_next(dir, property))
 170
 171int tb_register_property_dir(const char *key, struct tb_property_dir *dir);
 172void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
 173
 174/**
 175 * struct tb_xdomain - Cross-domain (XDomain) connection
 176 * @dev: XDomain device
 177 * @tb: Pointer to the domain
 178 * @remote_uuid: UUID of the remote domain (host)
 179 * @local_uuid: Cached local UUID
 180 * @route: Route string the other domain can be reached
 181 * @vendor: Vendor ID of the remote domain
 182 * @device: Device ID of the demote domain
 183 * @local_max_hopid: Maximum input HopID of this host
 184 * @remote_max_hopid: Maximum input HopID of the remote host
 185 * @lock: Lock to serialize access to the following fields of this structure
 186 * @vendor_name: Name of the vendor (or %NULL if not known)
 187 * @device_name: Name of the device (or %NULL if not known)
 188 * @link_speed: Speed of the link in Gb/s
 189 * @link_width: Width of the link (1 or 2)
 190 * @is_unplugged: The XDomain is unplugged
 191 * @needs_uuid: If the XDomain does not have @remote_uuid it will be
 192 *              queried first
 193 * @service_ids: Used to generate IDs for the services
 194 * @in_hopids: Input HopIDs for DMA tunneling
 195 * @out_hopids; Output HopIDs for DMA tunneling
 196 * @local_property_block: Local block of properties
 197 * @local_property_block_gen: Generation of @local_property_block
 198 * @local_property_block_len: Length of the @local_property_block in dwords
 199 * @remote_properties: Properties exported by the remote domain
 200 * @remote_property_block_gen: Generation of @remote_properties
 201 * @get_uuid_work: Work used to retrieve @remote_uuid
 202 * @uuid_retries: Number of times left @remote_uuid is requested before
 203 *                giving up
 204 * @get_properties_work: Work used to get remote domain properties
 205 * @properties_retries: Number of times left to read properties
 206 * @properties_changed_work: Work used to notify the remote domain that
 207 *                           our properties have changed
 208 * @properties_changed_retries: Number of times left to send properties
 209 *                              changed notification
 210 * @link: Root switch link the remote domain is connected (ICM only)
 211 * @depth: Depth in the chain the remote domain is connected (ICM only)
 212 *
 213 * This structure represents connection across two domains (hosts).
 214 * Each XDomain contains zero or more services which are exposed as
 215 * &struct tb_service objects.
 216 *
 217 * Service drivers may access this structure if they need to enumerate
 218 * non-standard properties but they need hold @lock when doing so
 219 * because properties can be changed asynchronously in response to
 220 * changes in the remote domain.
 221 */
 222struct tb_xdomain {
 223        struct device dev;
 224        struct tb *tb;
 225        uuid_t *remote_uuid;
 226        const uuid_t *local_uuid;
 227        u64 route;
 228        u16 vendor;
 229        u16 device;
 230        unsigned int local_max_hopid;
 231        unsigned int remote_max_hopid;
 232        struct mutex lock;
 233        const char *vendor_name;
 234        const char *device_name;
 235        unsigned int link_speed;
 236        unsigned int link_width;
 237        bool is_unplugged;
 238        bool needs_uuid;
 239        struct ida service_ids;
 240        struct ida in_hopids;
 241        struct ida out_hopids;
 242        u32 *local_property_block;
 243        u32 local_property_block_gen;
 244        u32 local_property_block_len;
 245        struct tb_property_dir *remote_properties;
 246        u32 remote_property_block_gen;
 247        struct delayed_work get_uuid_work;
 248        int uuid_retries;
 249        struct delayed_work get_properties_work;
 250        int properties_retries;
 251        struct delayed_work properties_changed_work;
 252        int properties_changed_retries;
 253        u8 link;
 254        u8 depth;
 255};
 256
 257int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd);
 258void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd);
 259int tb_xdomain_alloc_in_hopid(struct tb_xdomain *xd, int hopid);
 260void tb_xdomain_release_in_hopid(struct tb_xdomain *xd, int hopid);
 261int tb_xdomain_alloc_out_hopid(struct tb_xdomain *xd, int hopid);
 262void tb_xdomain_release_out_hopid(struct tb_xdomain *xd, int hopid);
 263int tb_xdomain_enable_paths(struct tb_xdomain *xd, int transmit_path,
 264                            int transmit_ring, int receive_path,
 265                            int receive_ring);
 266int tb_xdomain_disable_paths(struct tb_xdomain *xd, int transmit_path,
 267                             int transmit_ring, int receive_path,
 268                             int receive_ring);
 269
 270static inline int tb_xdomain_disable_all_paths(struct tb_xdomain *xd)
 271{
 272        return tb_xdomain_disable_paths(xd, -1, -1, -1, -1);
 273}
 274
 275struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid);
 276struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route);
 277
 278static inline struct tb_xdomain *
 279tb_xdomain_find_by_uuid_locked(struct tb *tb, const uuid_t *uuid)
 280{
 281        struct tb_xdomain *xd;
 282
 283        mutex_lock(&tb->lock);
 284        xd = tb_xdomain_find_by_uuid(tb, uuid);
 285        mutex_unlock(&tb->lock);
 286
 287        return xd;
 288}
 289
 290static inline struct tb_xdomain *
 291tb_xdomain_find_by_route_locked(struct tb *tb, u64 route)
 292{
 293        struct tb_xdomain *xd;
 294
 295        mutex_lock(&tb->lock);
 296        xd = tb_xdomain_find_by_route(tb, route);
 297        mutex_unlock(&tb->lock);
 298
 299        return xd;
 300}
 301
 302static inline struct tb_xdomain *tb_xdomain_get(struct tb_xdomain *xd)
 303{
 304        if (xd)
 305                get_device(&xd->dev);
 306        return xd;
 307}
 308
 309static inline void tb_xdomain_put(struct tb_xdomain *xd)
 310{
 311        if (xd)
 312                put_device(&xd->dev);
 313}
 314
 315static inline bool tb_is_xdomain(const struct device *dev)
 316{
 317        return dev->type == &tb_xdomain_type;
 318}
 319
 320static inline struct tb_xdomain *tb_to_xdomain(struct device *dev)
 321{
 322        if (tb_is_xdomain(dev))
 323                return container_of(dev, struct tb_xdomain, dev);
 324        return NULL;
 325}
 326
 327int tb_xdomain_response(struct tb_xdomain *xd, const void *response,
 328                        size_t size, enum tb_cfg_pkg_type type);
 329int tb_xdomain_request(struct tb_xdomain *xd, const void *request,
 330                       size_t request_size, enum tb_cfg_pkg_type request_type,
 331                       void *response, size_t response_size,
 332                       enum tb_cfg_pkg_type response_type,
 333                       unsigned int timeout_msec);
 334
 335/**
 336 * tb_protocol_handler - Protocol specific handler
 337 * @uuid: XDomain messages with this UUID are dispatched to this handler
 338 * @callback: Callback called with the XDomain message. Returning %1
 339 *            here tells the XDomain core that the message was handled
 340 *            by this handler and should not be forwared to other
 341 *            handlers.
 342 * @data: Data passed with the callback
 343 * @list: Handlers are linked using this
 344 *
 345 * Thunderbolt services can hook into incoming XDomain requests by
 346 * registering protocol handler. Only limitation is that the XDomain
 347 * discovery protocol UUID cannot be registered since it is handled by
 348 * the core XDomain code.
 349 *
 350 * The @callback must check that the message is really directed to the
 351 * service the driver implements.
 352 */
 353struct tb_protocol_handler {
 354        const uuid_t *uuid;
 355        int (*callback)(const void *buf, size_t size, void *data);
 356        void *data;
 357        struct list_head list;
 358};
 359
 360int tb_register_protocol_handler(struct tb_protocol_handler *handler);
 361void tb_unregister_protocol_handler(struct tb_protocol_handler *handler);
 362
 363/**
 364 * struct tb_service - Thunderbolt service
 365 * @dev: XDomain device
 366 * @id: ID of the service (shown in sysfs)
 367 * @key: Protocol key from the properties directory
 368 * @prtcid: Protocol ID from the properties directory
 369 * @prtcvers: Protocol version from the properties directory
 370 * @prtcrevs: Protocol software revision from the properties directory
 371 * @prtcstns: Protocol settings mask from the properties directory
 372 * @debugfs_dir: Pointer to the service debugfs directory. Always created
 373 *               when debugfs is enabled. Can be used by service drivers to
 374 *               add their own entries under the service.
 375 *
 376 * Each domain exposes set of services it supports as collection of
 377 * properties. For each service there will be one corresponding
 378 * &struct tb_service. Service drivers are bound to these.
 379 */
 380struct tb_service {
 381        struct device dev;
 382        int id;
 383        const char *key;
 384        u32 prtcid;
 385        u32 prtcvers;
 386        u32 prtcrevs;
 387        u32 prtcstns;
 388        struct dentry *debugfs_dir;
 389};
 390
 391static inline struct tb_service *tb_service_get(struct tb_service *svc)
 392{
 393        if (svc)
 394                get_device(&svc->dev);
 395        return svc;
 396}
 397
 398static inline void tb_service_put(struct tb_service *svc)
 399{
 400        if (svc)
 401                put_device(&svc->dev);
 402}
 403
 404static inline bool tb_is_service(const struct device *dev)
 405{
 406        return dev->type == &tb_service_type;
 407}
 408
 409static inline struct tb_service *tb_to_service(struct device *dev)
 410{
 411        if (tb_is_service(dev))
 412                return container_of(dev, struct tb_service, dev);
 413        return NULL;
 414}
 415
 416/**
 417 * tb_service_driver - Thunderbolt service driver
 418 * @driver: Driver structure
 419 * @probe: Called when the driver is probed
 420 * @remove: Called when the driver is removed (optional)
 421 * @shutdown: Called at shutdown time to stop the service (optional)
 422 * @id_table: Table of service identifiers the driver supports
 423 */
 424struct tb_service_driver {
 425        struct device_driver driver;
 426        int (*probe)(struct tb_service *svc, const struct tb_service_id *id);
 427        void (*remove)(struct tb_service *svc);
 428        void (*shutdown)(struct tb_service *svc);
 429        const struct tb_service_id *id_table;
 430};
 431
 432#define TB_SERVICE(key, id)                             \
 433        .match_flags = TBSVC_MATCH_PROTOCOL_KEY |       \
 434                       TBSVC_MATCH_PROTOCOL_ID,         \
 435        .protocol_key = (key),                          \
 436        .protocol_id = (id)
 437
 438int tb_register_service_driver(struct tb_service_driver *drv);
 439void tb_unregister_service_driver(struct tb_service_driver *drv);
 440
 441static inline void *tb_service_get_drvdata(const struct tb_service *svc)
 442{
 443        return dev_get_drvdata(&svc->dev);
 444}
 445
 446static inline void tb_service_set_drvdata(struct tb_service *svc, void *data)
 447{
 448        dev_set_drvdata(&svc->dev, data);
 449}
 450
 451static inline struct tb_xdomain *tb_service_parent(struct tb_service *svc)
 452{
 453        return tb_to_xdomain(svc->dev.parent);
 454}
 455
 456/**
 457 * struct tb_nhi - thunderbolt native host interface
 458 * @lock: Must be held during ring creation/destruction. Is acquired by
 459 *        interrupt_work when dispatching interrupts to individual rings.
 460 * @pdev: Pointer to the PCI device
 461 * @ops: NHI specific optional ops
 462 * @iobase: MMIO space of the NHI
 463 * @tx_rings: All Tx rings available on this host controller
 464 * @rx_rings: All Rx rings available on this host controller
 465 * @msix_ida: Used to allocate MSI-X vectors for rings
 466 * @going_away: The host controller device is about to disappear so when
 467 *              this flag is set, avoid touching the hardware anymore.
 468 * @interrupt_work: Work scheduled to handle ring interrupt when no
 469 *                  MSI-X is used.
 470 * @hop_count: Number of rings (end point hops) supported by NHI.
 471 */
 472struct tb_nhi {
 473        spinlock_t lock;
 474        struct pci_dev *pdev;
 475        const struct tb_nhi_ops *ops;
 476        void __iomem *iobase;
 477        struct tb_ring **tx_rings;
 478        struct tb_ring **rx_rings;
 479        struct ida msix_ida;
 480        bool going_away;
 481        struct work_struct interrupt_work;
 482        u32 hop_count;
 483};
 484
 485/**
 486 * struct tb_ring - thunderbolt TX or RX ring associated with a NHI
 487 * @lock: Lock serializing actions to this ring. Must be acquired after
 488 *        nhi->lock.
 489 * @nhi: Pointer to the native host controller interface
 490 * @size: Size of the ring
 491 * @hop: Hop (DMA channel) associated with this ring
 492 * @head: Head of the ring (write next descriptor here)
 493 * @tail: Tail of the ring (complete next descriptor here)
 494 * @descriptors: Allocated descriptors for this ring
 495 * @queue: Queue holding frames to be transferred over this ring
 496 * @in_flight: Queue holding frames that are currently in flight
 497 * @work: Interrupt work structure
 498 * @is_tx: Is the ring Tx or Rx
 499 * @running: Is the ring running
 500 * @irq: MSI-X irq number if the ring uses MSI-X. %0 otherwise.
 501 * @vector: MSI-X vector number the ring uses (only set if @irq is > 0)
 502 * @flags: Ring specific flags
 503 * @e2e_tx_hop: Transmit HopID when E2E is enabled. Only applicable to
 504 *              RX ring. For TX ring this should be set to %0.
 505 * @sof_mask: Bit mask used to detect start of frame PDF
 506 * @eof_mask: Bit mask used to detect end of frame PDF
 507 * @start_poll: Called when ring interrupt is triggered to start
 508 *              polling. Passing %NULL keeps the ring in interrupt mode.
 509 * @poll_data: Data passed to @start_poll
 510 */
 511struct tb_ring {
 512        spinlock_t lock;
 513        struct tb_nhi *nhi;
 514        int size;
 515        int hop;
 516        int head;
 517        int tail;
 518        struct ring_desc *descriptors;
 519        dma_addr_t descriptors_dma;
 520        struct list_head queue;
 521        struct list_head in_flight;
 522        struct work_struct work;
 523        bool is_tx:1;
 524        bool running:1;
 525        int irq;
 526        u8 vector;
 527        unsigned int flags;
 528        int e2e_tx_hop;
 529        u16 sof_mask;
 530        u16 eof_mask;
 531        void (*start_poll)(void *data);
 532        void *poll_data;
 533};
 534
 535/* Leave ring interrupt enabled on suspend */
 536#define RING_FLAG_NO_SUSPEND    BIT(0)
 537/* Configure the ring to be in frame mode */
 538#define RING_FLAG_FRAME         BIT(1)
 539/* Enable end-to-end flow control */
 540#define RING_FLAG_E2E           BIT(2)
 541
 542struct ring_frame;
 543typedef void (*ring_cb)(struct tb_ring *, struct ring_frame *, bool canceled);
 544
 545/**
 546 * enum ring_desc_flags - Flags for DMA ring descriptor
 547 * %RING_DESC_ISOCH: Enable isonchronous DMA (Tx only)
 548 * %RING_DESC_CRC_ERROR: In frame mode CRC check failed for the frame (Rx only)
 549 * %RING_DESC_COMPLETED: Descriptor completed (set by NHI)
 550 * %RING_DESC_POSTED: Always set this
 551 * %RING_DESC_BUFFER_OVERRUN: RX buffer overrun
 552 * %RING_DESC_INTERRUPT: Request an interrupt on completion
 553 */
 554enum ring_desc_flags {
 555        RING_DESC_ISOCH = 0x1,
 556        RING_DESC_CRC_ERROR = 0x1,
 557        RING_DESC_COMPLETED = 0x2,
 558        RING_DESC_POSTED = 0x4,
 559        RING_DESC_BUFFER_OVERRUN = 0x04,
 560        RING_DESC_INTERRUPT = 0x8,
 561};
 562
 563/**
 564 * struct ring_frame - For use with ring_rx/ring_tx
 565 * @buffer_phy: DMA mapped address of the frame
 566 * @callback: Callback called when the frame is finished (optional)
 567 * @list: Frame is linked to a queue using this
 568 * @size: Size of the frame in bytes (%0 means %4096)
 569 * @flags: Flags for the frame (see &enum ring_desc_flags)
 570 * @eof: End of frame protocol defined field
 571 * @sof: Start of frame protocol defined field
 572 */
 573struct ring_frame {
 574        dma_addr_t buffer_phy;
 575        ring_cb callback;
 576        struct list_head list;
 577        u32 size:12;
 578        u32 flags:12;
 579        u32 eof:4;
 580        u32 sof:4;
 581};
 582
 583/* Minimum size for ring_rx */
 584#define TB_FRAME_SIZE           0x100
 585
 586struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
 587                                 unsigned int flags);
 588struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
 589                                 unsigned int flags, int e2e_tx_hop,
 590                                 u16 sof_mask, u16 eof_mask,
 591                                 void (*start_poll)(void *), void *poll_data);
 592void tb_ring_start(struct tb_ring *ring);
 593void tb_ring_stop(struct tb_ring *ring);
 594void tb_ring_free(struct tb_ring *ring);
 595
 596int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame);
 597
 598/**
 599 * tb_ring_rx() - enqueue a frame on an RX ring
 600 * @ring: Ring to enqueue the frame
 601 * @frame: Frame to enqueue
 602 *
 603 * @frame->buffer, @frame->buffer_phy have to be set. The buffer must
 604 * contain at least %TB_FRAME_SIZE bytes.
 605 *
 606 * @frame->callback will be invoked with @frame->size, @frame->flags,
 607 * @frame->eof, @frame->sof set once the frame has been received.
 608 *
 609 * If ring_stop() is called after the packet has been enqueued
 610 * @frame->callback will be called with canceled set to true.
 611 *
 612 * Return: Returns %-ESHUTDOWN if ring_stop has been called. Zero otherwise.
 613 */
 614static inline int tb_ring_rx(struct tb_ring *ring, struct ring_frame *frame)
 615{
 616        WARN_ON(ring->is_tx);
 617        return __tb_ring_enqueue(ring, frame);
 618}
 619
 620/**
 621 * tb_ring_tx() - enqueue a frame on an TX ring
 622 * @ring: Ring the enqueue the frame
 623 * @frame: Frame to enqueue
 624 *
 625 * @frame->buffer, @frame->buffer_phy, @frame->size, @frame->eof and
 626 * @frame->sof have to be set.
 627 *
 628 * @frame->callback will be invoked with once the frame has been transmitted.
 629 *
 630 * If ring_stop() is called after the packet has been enqueued @frame->callback
 631 * will be called with canceled set to true.
 632 *
 633 * Return: Returns %-ESHUTDOWN if ring_stop has been called. Zero otherwise.
 634 */
 635static inline int tb_ring_tx(struct tb_ring *ring, struct ring_frame *frame)
 636{
 637        WARN_ON(!ring->is_tx);
 638        return __tb_ring_enqueue(ring, frame);
 639}
 640
 641/* Used only when the ring is in polling mode */
 642struct ring_frame *tb_ring_poll(struct tb_ring *ring);
 643void tb_ring_poll_complete(struct tb_ring *ring);
 644
 645/**
 646 * tb_ring_dma_device() - Return device used for DMA mapping
 647 * @ring: Ring whose DMA device is retrieved
 648 *
 649 * Use this function when you are mapping DMA for buffers that are
 650 * passed to the ring for sending/receiving.
 651 */
 652static inline struct device *tb_ring_dma_device(struct tb_ring *ring)
 653{
 654        return &ring->nhi->pdev->dev;
 655}
 656
 657#endif /* THUNDERBOLT_H_ */
 658