1
2
3
4
5
6
7
8
9
10
11#ifndef THUNDERBOLT_H_
12#define THUNDERBOLT_H_
13
14#include <linux/device.h>
15#include <linux/idr.h>
16#include <linux/list.h>
17#include <linux/mutex.h>
18#include <linux/mod_devicetable.h>
19#include <linux/pci.h>
20#include <linux/uuid.h>
21#include <linux/workqueue.h>
22
23enum tb_cfg_pkg_type {
24 TB_CFG_PKG_READ = 1,
25 TB_CFG_PKG_WRITE = 2,
26 TB_CFG_PKG_ERROR = 3,
27 TB_CFG_PKG_NOTIFY_ACK = 4,
28 TB_CFG_PKG_EVENT = 5,
29 TB_CFG_PKG_XDOMAIN_REQ = 6,
30 TB_CFG_PKG_XDOMAIN_RESP = 7,
31 TB_CFG_PKG_OVERRIDE = 8,
32 TB_CFG_PKG_RESET = 9,
33 TB_CFG_PKG_ICM_EVENT = 10,
34 TB_CFG_PKG_ICM_CMD = 11,
35 TB_CFG_PKG_ICM_RESP = 12,
36 TB_CFG_PKG_PREPARE_TO_SLEEP = 13,
37};
38
39
40
41
42
43
44
45
46
47
48
49enum tb_security_level {
50 TB_SECURITY_NONE,
51 TB_SECURITY_USER,
52 TB_SECURITY_SECURE,
53 TB_SECURITY_DPONLY,
54 TB_SECURITY_USBONLY,
55};
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72struct tb {
73 struct device dev;
74 struct mutex lock;
75 struct tb_nhi *nhi;
76 struct tb_ctl *ctl;
77 struct workqueue_struct *wq;
78 struct tb_switch *root_switch;
79 const struct tb_cm_ops *cm_ops;
80 int index;
81 enum tb_security_level security_level;
82 size_t nboot_acl;
83 unsigned long privdata[0];
84};
85
86extern struct bus_type tb_bus_type;
87extern struct device_type tb_service_type;
88extern struct device_type tb_xdomain_type;
89
90#define TB_LINKS_PER_PHY_PORT 2
91
92static inline unsigned int tb_phy_port_from_link(unsigned int link)
93{
94 return (link - 1) / TB_LINKS_PER_PHY_PORT;
95}
96
97
98
99
100
101
102
103
104struct tb_property_dir {
105 const uuid_t *uuid;
106 struct list_head properties;
107};
108
109enum tb_property_type {
110 TB_PROPERTY_TYPE_UNKNOWN = 0x00,
111 TB_PROPERTY_TYPE_DIRECTORY = 0x44,
112 TB_PROPERTY_TYPE_DATA = 0x64,
113 TB_PROPERTY_TYPE_TEXT = 0x74,
114 TB_PROPERTY_TYPE_VALUE = 0x76,
115};
116
117#define TB_PROPERTY_KEY_SIZE 8
118
119
120
121
122
123
124
125
126
127
128
129struct tb_property {
130 struct list_head list;
131 char key[TB_PROPERTY_KEY_SIZE + 1];
132 enum tb_property_type type;
133 size_t length;
134 union {
135 struct tb_property_dir *dir;
136 u8 *data;
137 char *text;
138 u32 immediate;
139 } value;
140};
141
142struct tb_property_dir *tb_property_parse_dir(const u32 *block,
143 size_t block_len);
144ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block,
145 size_t block_len);
146struct tb_property_dir *tb_property_create_dir(const uuid_t *uuid);
147void tb_property_free_dir(struct tb_property_dir *dir);
148int tb_property_add_immediate(struct tb_property_dir *parent, const char *key,
149 u32 value);
150int tb_property_add_data(struct tb_property_dir *parent, const char *key,
151 const void *buf, size_t buflen);
152int tb_property_add_text(struct tb_property_dir *parent, const char *key,
153 const char *text);
154int tb_property_add_dir(struct tb_property_dir *parent, const char *key,
155 struct tb_property_dir *dir);
156void tb_property_remove(struct tb_property *tb_property);
157struct tb_property *tb_property_find(struct tb_property_dir *dir,
158 const char *key, enum tb_property_type type);
159struct tb_property *tb_property_get_next(struct tb_property_dir *dir,
160 struct tb_property *prev);
161
162#define tb_property_for_each(dir, property) \
163 for (property = tb_property_get_next(dir, NULL); \
164 property; \
165 property = tb_property_get_next(dir, property))
166
167int tb_register_property_dir(const char *key, struct tb_property_dir *dir);
168void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215struct tb_xdomain {
216 struct device dev;
217 struct tb *tb;
218 uuid_t *remote_uuid;
219 const uuid_t *local_uuid;
220 u64 route;
221 u16 vendor;
222 u16 device;
223 struct mutex lock;
224 const char *vendor_name;
225 const char *device_name;
226 bool is_unplugged;
227 bool resume;
228 bool needs_uuid;
229 u16 transmit_path;
230 u16 transmit_ring;
231 u16 receive_path;
232 u16 receive_ring;
233 struct ida service_ids;
234 struct tb_property_dir *properties;
235 u32 property_block_gen;
236 struct delayed_work get_uuid_work;
237 int uuid_retries;
238 struct delayed_work get_properties_work;
239 int properties_retries;
240 struct delayed_work properties_changed_work;
241 int properties_changed_retries;
242 u8 link;
243 u8 depth;
244};
245
246int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path,
247 u16 transmit_ring, u16 receive_path,
248 u16 receive_ring);
249int tb_xdomain_disable_paths(struct tb_xdomain *xd);
250struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid);
251struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route);
252
253static inline struct tb_xdomain *
254tb_xdomain_find_by_uuid_locked(struct tb *tb, const uuid_t *uuid)
255{
256 struct tb_xdomain *xd;
257
258 mutex_lock(&tb->lock);
259 xd = tb_xdomain_find_by_uuid(tb, uuid);
260 mutex_unlock(&tb->lock);
261
262 return xd;
263}
264
265static inline struct tb_xdomain *
266tb_xdomain_find_by_route_locked(struct tb *tb, u64 route)
267{
268 struct tb_xdomain *xd;
269
270 mutex_lock(&tb->lock);
271 xd = tb_xdomain_find_by_route(tb, route);
272 mutex_unlock(&tb->lock);
273
274 return xd;
275}
276
277static inline struct tb_xdomain *tb_xdomain_get(struct tb_xdomain *xd)
278{
279 if (xd)
280 get_device(&xd->dev);
281 return xd;
282}
283
284static inline void tb_xdomain_put(struct tb_xdomain *xd)
285{
286 if (xd)
287 put_device(&xd->dev);
288}
289
290static inline bool tb_is_xdomain(const struct device *dev)
291{
292 return dev->type == &tb_xdomain_type;
293}
294
295static inline struct tb_xdomain *tb_to_xdomain(struct device *dev)
296{
297 if (tb_is_xdomain(dev))
298 return container_of(dev, struct tb_xdomain, dev);
299 return NULL;
300}
301
302int tb_xdomain_response(struct tb_xdomain *xd, const void *response,
303 size_t size, enum tb_cfg_pkg_type type);
304int tb_xdomain_request(struct tb_xdomain *xd, const void *request,
305 size_t request_size, enum tb_cfg_pkg_type request_type,
306 void *response, size_t response_size,
307 enum tb_cfg_pkg_type response_type,
308 unsigned int timeout_msec);
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328struct tb_protocol_handler {
329 const uuid_t *uuid;
330 int (*callback)(const void *buf, size_t size, void *data);
331 void *data;
332 struct list_head list;
333};
334
335int tb_register_protocol_handler(struct tb_protocol_handler *handler);
336void tb_unregister_protocol_handler(struct tb_protocol_handler *handler);
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352struct tb_service {
353 struct device dev;
354 int id;
355 const char *key;
356 u32 prtcid;
357 u32 prtcvers;
358 u32 prtcrevs;
359 u32 prtcstns;
360};
361
362static inline struct tb_service *tb_service_get(struct tb_service *svc)
363{
364 if (svc)
365 get_device(&svc->dev);
366 return svc;
367}
368
369static inline void tb_service_put(struct tb_service *svc)
370{
371 if (svc)
372 put_device(&svc->dev);
373}
374
375static inline bool tb_is_service(const struct device *dev)
376{
377 return dev->type == &tb_service_type;
378}
379
380static inline struct tb_service *tb_to_service(struct device *dev)
381{
382 if (tb_is_service(dev))
383 return container_of(dev, struct tb_service, dev);
384 return NULL;
385}
386
387
388
389
390
391
392
393
394
395struct tb_service_driver {
396 struct device_driver driver;
397 int (*probe)(struct tb_service *svc, const struct tb_service_id *id);
398 void (*remove)(struct tb_service *svc);
399 void (*shutdown)(struct tb_service *svc);
400 const struct tb_service_id *id_table;
401};
402
403#define TB_SERVICE(key, id) \
404 .match_flags = TBSVC_MATCH_PROTOCOL_KEY | \
405 TBSVC_MATCH_PROTOCOL_ID, \
406 .protocol_key = (key), \
407 .protocol_id = (id)
408
409int tb_register_service_driver(struct tb_service_driver *drv);
410void tb_unregister_service_driver(struct tb_service_driver *drv);
411
412static inline void *tb_service_get_drvdata(const struct tb_service *svc)
413{
414 return dev_get_drvdata(&svc->dev);
415}
416
417static inline void tb_service_set_drvdata(struct tb_service *svc, void *data)
418{
419 dev_set_drvdata(&svc->dev, data);
420}
421
422static inline struct tb_xdomain *tb_service_parent(struct tb_service *svc)
423{
424 return tb_to_xdomain(svc->dev.parent);
425}
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442struct tb_nhi {
443 spinlock_t lock;
444 struct pci_dev *pdev;
445 void __iomem *iobase;
446 struct tb_ring **tx_rings;
447 struct tb_ring **rx_rings;
448 struct ida msix_ida;
449 bool going_away;
450 struct work_struct interrupt_work;
451 u32 hop_count;
452};
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478struct tb_ring {
479 spinlock_t lock;
480 struct tb_nhi *nhi;
481 int size;
482 int hop;
483 int head;
484 int tail;
485 struct ring_desc *descriptors;
486 dma_addr_t descriptors_dma;
487 struct list_head queue;
488 struct list_head in_flight;
489 struct work_struct work;
490 bool is_tx:1;
491 bool running:1;
492 int irq;
493 u8 vector;
494 unsigned int flags;
495 u16 sof_mask;
496 u16 eof_mask;
497 void (*start_poll)(void *data);
498 void *poll_data;
499};
500
501
502#define RING_FLAG_NO_SUSPEND BIT(0)
503
504#define RING_FLAG_FRAME BIT(1)
505
506#define RING_FLAG_E2E BIT(2)
507
508struct ring_frame;
509typedef void (*ring_cb)(struct tb_ring *, struct ring_frame *, bool canceled);
510
511
512
513
514
515
516
517
518
519
520enum ring_desc_flags {
521 RING_DESC_ISOCH = 0x1,
522 RING_DESC_CRC_ERROR = 0x1,
523 RING_DESC_COMPLETED = 0x2,
524 RING_DESC_POSTED = 0x4,
525 RING_DESC_BUFFER_OVERRUN = 0x04,
526 RING_DESC_INTERRUPT = 0x8,
527};
528
529
530
531
532
533
534
535
536
537
538
539struct ring_frame {
540 dma_addr_t buffer_phy;
541 ring_cb callback;
542 struct list_head list;
543 u32 size:12;
544 u32 flags:12;
545 u32 eof:4;
546 u32 sof:4;
547};
548
549
550#define TB_FRAME_SIZE 0x100
551
552struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
553 unsigned int flags);
554struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
555 unsigned int flags, u16 sof_mask, u16 eof_mask,
556 void (*start_poll)(void *), void *poll_data);
557void tb_ring_start(struct tb_ring *ring);
558void tb_ring_stop(struct tb_ring *ring);
559void tb_ring_free(struct tb_ring *ring);
560
561int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame);
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579static inline int tb_ring_rx(struct tb_ring *ring, struct ring_frame *frame)
580{
581 WARN_ON(ring->is_tx);
582 return __tb_ring_enqueue(ring, frame);
583}
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600static inline int tb_ring_tx(struct tb_ring *ring, struct ring_frame *frame)
601{
602 WARN_ON(!ring->is_tx);
603 return __tb_ring_enqueue(ring, frame);
604}
605
606
607struct ring_frame *tb_ring_poll(struct tb_ring *ring);
608void tb_ring_poll_complete(struct tb_ring *ring);
609
610
611
612
613
614
615
616
617static inline struct device *tb_ring_dma_device(struct tb_ring *ring)
618{
619 return &ring->nhi->pdev->dev;
620}
621
622#endif
623