1
2
3
4
5
6
7
8
9
10
11
12
13
14#ifndef THUNDERBOLT_H_
15#define THUNDERBOLT_H_
16
17#include <linux/device.h>
18#include <linux/idr.h>
19#include <linux/list.h>
20#include <linux/mutex.h>
21#include <linux/mod_devicetable.h>
22#include <linux/pci.h>
23#include <linux/uuid.h>
24#include <linux/workqueue.h>
25
26enum tb_cfg_pkg_type {
27 TB_CFG_PKG_READ = 1,
28 TB_CFG_PKG_WRITE = 2,
29 TB_CFG_PKG_ERROR = 3,
30 TB_CFG_PKG_NOTIFY_ACK = 4,
31 TB_CFG_PKG_EVENT = 5,
32 TB_CFG_PKG_XDOMAIN_REQ = 6,
33 TB_CFG_PKG_XDOMAIN_RESP = 7,
34 TB_CFG_PKG_OVERRIDE = 8,
35 TB_CFG_PKG_RESET = 9,
36 TB_CFG_PKG_ICM_EVENT = 10,
37 TB_CFG_PKG_ICM_CMD = 11,
38 TB_CFG_PKG_ICM_RESP = 12,
39 TB_CFG_PKG_PREPARE_TO_SLEEP = 13,
40};
41
42
43
44
45
46
47
48
49
50
51
52enum tb_security_level {
53 TB_SECURITY_NONE,
54 TB_SECURITY_USER,
55 TB_SECURITY_SECURE,
56 TB_SECURITY_DPONLY,
57 TB_SECURITY_USBONLY,
58};
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75struct tb {
76 struct device dev;
77 struct mutex lock;
78 struct tb_nhi *nhi;
79 struct tb_ctl *ctl;
80 struct workqueue_struct *wq;
81 struct tb_switch *root_switch;
82 const struct tb_cm_ops *cm_ops;
83 int index;
84 enum tb_security_level security_level;
85 size_t nboot_acl;
86 unsigned long privdata[0];
87};
88
89extern struct bus_type tb_bus_type;
90extern struct device_type tb_service_type;
91extern struct device_type tb_xdomain_type;
92
93#define TB_LINKS_PER_PHY_PORT 2
94
95static inline unsigned int tb_phy_port_from_link(unsigned int link)
96{
97 return (link - 1) / TB_LINKS_PER_PHY_PORT;
98}
99
100
101
102
103
104
105
106
107struct tb_property_dir {
108 const uuid_t *uuid;
109 struct list_head properties;
110};
111
112enum tb_property_type {
113 TB_PROPERTY_TYPE_UNKNOWN = 0x00,
114 TB_PROPERTY_TYPE_DIRECTORY = 0x44,
115 TB_PROPERTY_TYPE_DATA = 0x64,
116 TB_PROPERTY_TYPE_TEXT = 0x74,
117 TB_PROPERTY_TYPE_VALUE = 0x76,
118};
119
120#define TB_PROPERTY_KEY_SIZE 8
121
122
123
124
125
126
127
128
129
130
131
132struct tb_property {
133 struct list_head list;
134 char key[TB_PROPERTY_KEY_SIZE + 1];
135 enum tb_property_type type;
136 size_t length;
137 union {
138 struct tb_property_dir *dir;
139 u8 *data;
140 char *text;
141 u32 immediate;
142 } value;
143};
144
145struct tb_property_dir *tb_property_parse_dir(const u32 *block,
146 size_t block_len);
147ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block,
148 size_t block_len);
149struct tb_property_dir *tb_property_create_dir(const uuid_t *uuid);
150void tb_property_free_dir(struct tb_property_dir *dir);
151int tb_property_add_immediate(struct tb_property_dir *parent, const char *key,
152 u32 value);
153int tb_property_add_data(struct tb_property_dir *parent, const char *key,
154 const void *buf, size_t buflen);
155int tb_property_add_text(struct tb_property_dir *parent, const char *key,
156 const char *text);
157int tb_property_add_dir(struct tb_property_dir *parent, const char *key,
158 struct tb_property_dir *dir);
159void tb_property_remove(struct tb_property *tb_property);
160struct tb_property *tb_property_find(struct tb_property_dir *dir,
161 const char *key, enum tb_property_type type);
162struct tb_property *tb_property_get_next(struct tb_property_dir *dir,
163 struct tb_property *prev);
164
165#define tb_property_for_each(dir, property) \
166 for (property = tb_property_get_next(dir, NULL); \
167 property; \
168 property = tb_property_get_next(dir, property))
169
170int tb_register_property_dir(const char *key, struct tb_property_dir *dir);
171void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213struct tb_xdomain {
214 struct device dev;
215 struct tb *tb;
216 uuid_t *remote_uuid;
217 const uuid_t *local_uuid;
218 u64 route;
219 u16 vendor;
220 u16 device;
221 struct mutex lock;
222 const char *vendor_name;
223 const char *device_name;
224 bool is_unplugged;
225 bool resume;
226 u16 transmit_path;
227 u16 transmit_ring;
228 u16 receive_path;
229 u16 receive_ring;
230 struct ida service_ids;
231 struct tb_property_dir *properties;
232 u32 property_block_gen;
233 struct delayed_work get_properties_work;
234 int properties_retries;
235 struct delayed_work properties_changed_work;
236 int properties_changed_retries;
237 u8 link;
238 u8 depth;
239};
240
241int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path,
242 u16 transmit_ring, u16 receive_path,
243 u16 receive_ring);
244int tb_xdomain_disable_paths(struct tb_xdomain *xd);
245struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid);
246struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route);
247
248static inline struct tb_xdomain *
249tb_xdomain_find_by_uuid_locked(struct tb *tb, const uuid_t *uuid)
250{
251 struct tb_xdomain *xd;
252
253 mutex_lock(&tb->lock);
254 xd = tb_xdomain_find_by_uuid(tb, uuid);
255 mutex_unlock(&tb->lock);
256
257 return xd;
258}
259
260static inline struct tb_xdomain *
261tb_xdomain_find_by_route_locked(struct tb *tb, u64 route)
262{
263 struct tb_xdomain *xd;
264
265 mutex_lock(&tb->lock);
266 xd = tb_xdomain_find_by_route(tb, route);
267 mutex_unlock(&tb->lock);
268
269 return xd;
270}
271
272static inline struct tb_xdomain *tb_xdomain_get(struct tb_xdomain *xd)
273{
274 if (xd)
275 get_device(&xd->dev);
276 return xd;
277}
278
279static inline void tb_xdomain_put(struct tb_xdomain *xd)
280{
281 if (xd)
282 put_device(&xd->dev);
283}
284
285static inline bool tb_is_xdomain(const struct device *dev)
286{
287 return dev->type == &tb_xdomain_type;
288}
289
290static inline struct tb_xdomain *tb_to_xdomain(struct device *dev)
291{
292 if (tb_is_xdomain(dev))
293 return container_of(dev, struct tb_xdomain, dev);
294 return NULL;
295}
296
297int tb_xdomain_response(struct tb_xdomain *xd, const void *response,
298 size_t size, enum tb_cfg_pkg_type type);
299int tb_xdomain_request(struct tb_xdomain *xd, const void *request,
300 size_t request_size, enum tb_cfg_pkg_type request_type,
301 void *response, size_t response_size,
302 enum tb_cfg_pkg_type response_type,
303 unsigned int timeout_msec);
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323struct tb_protocol_handler {
324 const uuid_t *uuid;
325 int (*callback)(const void *buf, size_t size, void *data);
326 void *data;
327 struct list_head list;
328};
329
330int tb_register_protocol_handler(struct tb_protocol_handler *handler);
331void tb_unregister_protocol_handler(struct tb_protocol_handler *handler);
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347struct tb_service {
348 struct device dev;
349 int id;
350 const char *key;
351 u32 prtcid;
352 u32 prtcvers;
353 u32 prtcrevs;
354 u32 prtcstns;
355};
356
357static inline struct tb_service *tb_service_get(struct tb_service *svc)
358{
359 if (svc)
360 get_device(&svc->dev);
361 return svc;
362}
363
364static inline void tb_service_put(struct tb_service *svc)
365{
366 if (svc)
367 put_device(&svc->dev);
368}
369
370static inline bool tb_is_service(const struct device *dev)
371{
372 return dev->type == &tb_service_type;
373}
374
375static inline struct tb_service *tb_to_service(struct device *dev)
376{
377 if (tb_is_service(dev))
378 return container_of(dev, struct tb_service, dev);
379 return NULL;
380}
381
382
383
384
385
386
387
388
389
390struct tb_service_driver {
391 struct device_driver driver;
392 int (*probe)(struct tb_service *svc, const struct tb_service_id *id);
393 void (*remove)(struct tb_service *svc);
394 void (*shutdown)(struct tb_service *svc);
395 const struct tb_service_id *id_table;
396};
397
398#define TB_SERVICE(key, id) \
399 .match_flags = TBSVC_MATCH_PROTOCOL_KEY | \
400 TBSVC_MATCH_PROTOCOL_ID, \
401 .protocol_key = (key), \
402 .protocol_id = (id)
403
404int tb_register_service_driver(struct tb_service_driver *drv);
405void tb_unregister_service_driver(struct tb_service_driver *drv);
406
407static inline void *tb_service_get_drvdata(const struct tb_service *svc)
408{
409 return dev_get_drvdata(&svc->dev);
410}
411
412static inline void tb_service_set_drvdata(struct tb_service *svc, void *data)
413{
414 dev_set_drvdata(&svc->dev, data);
415}
416
417static inline struct tb_xdomain *tb_service_parent(struct tb_service *svc)
418{
419 return tb_to_xdomain(svc->dev.parent);
420}
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437struct tb_nhi {
438 spinlock_t lock;
439 struct pci_dev *pdev;
440 void __iomem *iobase;
441 struct tb_ring **tx_rings;
442 struct tb_ring **rx_rings;
443 struct ida msix_ida;
444 bool going_away;
445 struct work_struct interrupt_work;
446 u32 hop_count;
447};
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473struct tb_ring {
474 spinlock_t lock;
475 struct tb_nhi *nhi;
476 int size;
477 int hop;
478 int head;
479 int tail;
480 struct ring_desc *descriptors;
481 dma_addr_t descriptors_dma;
482 struct list_head queue;
483 struct list_head in_flight;
484 struct work_struct work;
485 bool is_tx:1;
486 bool running:1;
487 int irq;
488 u8 vector;
489 unsigned int flags;
490 u16 sof_mask;
491 u16 eof_mask;
492 void (*start_poll)(void *data);
493 void *poll_data;
494};
495
496
497#define RING_FLAG_NO_SUSPEND BIT(0)
498
499#define RING_FLAG_FRAME BIT(1)
500
501#define RING_FLAG_E2E BIT(2)
502
503struct ring_frame;
504typedef void (*ring_cb)(struct tb_ring *, struct ring_frame *, bool canceled);
505
506
507
508
509
510
511
512
513
514
515enum ring_desc_flags {
516 RING_DESC_ISOCH = 0x1,
517 RING_DESC_CRC_ERROR = 0x1,
518 RING_DESC_COMPLETED = 0x2,
519 RING_DESC_POSTED = 0x4,
520 RING_DESC_BUFFER_OVERRUN = 0x04,
521 RING_DESC_INTERRUPT = 0x8,
522};
523
524
525
526
527
528
529
530
531
532
533
534struct ring_frame {
535 dma_addr_t buffer_phy;
536 ring_cb callback;
537 struct list_head list;
538 u32 size:12;
539 u32 flags:12;
540 u32 eof:4;
541 u32 sof:4;
542};
543
544
545#define TB_FRAME_SIZE 0x100
546
547struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
548 unsigned int flags);
549struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
550 unsigned int flags, u16 sof_mask, u16 eof_mask,
551 void (*start_poll)(void *), void *poll_data);
552void tb_ring_start(struct tb_ring *ring);
553void tb_ring_stop(struct tb_ring *ring);
554void tb_ring_free(struct tb_ring *ring);
555
556int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame);
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574static inline int tb_ring_rx(struct tb_ring *ring, struct ring_frame *frame)
575{
576 WARN_ON(ring->is_tx);
577 return __tb_ring_enqueue(ring, frame);
578}
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595static inline int tb_ring_tx(struct tb_ring *ring, struct ring_frame *frame)
596{
597 WARN_ON(!ring->is_tx);
598 return __tb_ring_enqueue(ring, frame);
599}
600
601
602struct ring_frame *tb_ring_poll(struct tb_ring *ring);
603void tb_ring_poll_complete(struct tb_ring *ring);
604
605
606
607
608
609
610
611
612static inline struct device *tb_ring_dma_device(struct tb_ring *ring)
613{
614 return &ring->nhi->pdev->dev;
615}
616
617#endif
618