1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/atomic.h>
15#include <linux/highmem.h>
16#include <linux/if_vlan.h>
17#include <linux/jhash.h>
18#include <linux/module.h>
19#include <linux/etherdevice.h>
20#include <linux/rtnetlink.h>
21#include <linux/sizes.h>
22#include <linux/thunderbolt.h>
23#include <linux/uuid.h>
24#include <linux/workqueue.h>
25
26#include <net/ip6_checksum.h>
27
28
29#define TBNET_LOGIN_DELAY 4500
30#define TBNET_LOGIN_TIMEOUT 500
31#define TBNET_LOGOUT_TIMEOUT 100
32
33#define TBNET_RING_SIZE 256
34#define TBNET_LOCAL_PATH 0xf
35#define TBNET_LOGIN_RETRIES 60
36#define TBNET_LOGOUT_RETRIES 5
37#define TBNET_MATCH_FRAGS_ID BIT(1)
38#define TBNET_MAX_MTU SZ_64K
39#define TBNET_FRAME_SIZE SZ_4K
40#define TBNET_MAX_PAYLOAD_SIZE \
41 (TBNET_FRAME_SIZE - sizeof(struct thunderbolt_ip_frame_header))
42
43#define TBNET_RX_MAX_SIZE \
44 (TBNET_FRAME_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
45#define TBNET_RX_PAGE_ORDER get_order(TBNET_RX_MAX_SIZE)
46#define TBNET_RX_PAGE_SIZE (PAGE_SIZE << TBNET_RX_PAGE_ORDER)
47
48#define TBNET_L0_PORT_NUM(route) ((route) & GENMASK(5, 0))
49
50
51
52
53
54
55
56
57
58
59
60
61struct thunderbolt_ip_frame_header {
62 u32 frame_size;
63 u16 frame_index;
64 u16 frame_id;
65 u32 frame_count;
66};
67
68enum thunderbolt_ip_frame_pdf {
69 TBIP_PDF_FRAME_START = 1,
70 TBIP_PDF_FRAME_END,
71};
72
73enum thunderbolt_ip_type {
74 TBIP_LOGIN,
75 TBIP_LOGIN_RESPONSE,
76 TBIP_LOGOUT,
77 TBIP_STATUS,
78};
79
80struct thunderbolt_ip_header {
81 u32 route_hi;
82 u32 route_lo;
83 u32 length_sn;
84 uuid_t uuid;
85 uuid_t initiator_uuid;
86 uuid_t target_uuid;
87 u32 type;
88 u32 command_id;
89};
90
91#define TBIP_HDR_LENGTH_MASK GENMASK(5, 0)
92#define TBIP_HDR_SN_MASK GENMASK(28, 27)
93#define TBIP_HDR_SN_SHIFT 27
94
95struct thunderbolt_ip_login {
96 struct thunderbolt_ip_header hdr;
97 u32 proto_version;
98 u32 transmit_path;
99 u32 reserved[4];
100};
101
102#define TBIP_LOGIN_PROTO_VERSION 1
103
104struct thunderbolt_ip_login_response {
105 struct thunderbolt_ip_header hdr;
106 u32 status;
107 u32 receiver_mac[2];
108 u32 receiver_mac_len;
109 u32 reserved[4];
110};
111
112struct thunderbolt_ip_logout {
113 struct thunderbolt_ip_header hdr;
114};
115
116struct thunderbolt_ip_status {
117 struct thunderbolt_ip_header hdr;
118 u32 status;
119};
120
121struct tbnet_stats {
122 u64 tx_packets;
123 u64 rx_packets;
124 u64 tx_bytes;
125 u64 rx_bytes;
126 u64 rx_errors;
127 u64 tx_errors;
128 u64 rx_length_errors;
129 u64 rx_over_errors;
130 u64 rx_crc_errors;
131 u64 rx_missed_errors;
132};
133
134struct tbnet_frame {
135 struct net_device *dev;
136 struct page *page;
137 struct ring_frame frame;
138};
139
140struct tbnet_ring {
141 struct tbnet_frame frames[TBNET_RING_SIZE];
142 unsigned int cons;
143 unsigned int prod;
144 struct tb_ring *ring;
145};
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177struct tbnet {
178 const struct tb_service *svc;
179 struct tb_xdomain *xd;
180 struct tb_protocol_handler handler;
181 struct net_device *dev;
182 struct napi_struct napi;
183 struct tbnet_stats stats;
184 struct sk_buff *skb;
185 atomic_t command_id;
186 bool login_sent;
187 bool login_received;
188 u32 transmit_path;
189 struct mutex connection_lock;
190 int login_retries;
191 struct delayed_work login_work;
192 struct work_struct connected_work;
193 struct thunderbolt_ip_frame_header rx_hdr;
194 struct tbnet_ring rx_ring;
195 atomic_t frame_id;
196 struct tbnet_ring tx_ring;
197};
198
199
200static const uuid_t tbnet_dir_uuid =
201 UUID_INIT(0xc66189ca, 0x1cce, 0x4195,
202 0xbd, 0xb8, 0x49, 0x59, 0x2e, 0x5f, 0x5a, 0x4f);
203
204
205static const uuid_t tbnet_svc_uuid =
206 UUID_INIT(0x798f589e, 0x3616, 0x8a47,
207 0x97, 0xc6, 0x56, 0x64, 0xa9, 0x20, 0xc8, 0xdd);
208
209static struct tb_property_dir *tbnet_dir;
210
211static void tbnet_fill_header(struct thunderbolt_ip_header *hdr, u64 route,
212 u8 sequence, const uuid_t *initiator_uuid, const uuid_t *target_uuid,
213 enum thunderbolt_ip_type type, size_t size, u32 command_id)
214{
215 u32 length_sn;
216
217
218 length_sn = (size - 3 * 4) / 4;
219 length_sn |= (sequence << TBIP_HDR_SN_SHIFT) & TBIP_HDR_SN_MASK;
220
221 hdr->route_hi = upper_32_bits(route);
222 hdr->route_lo = lower_32_bits(route);
223 hdr->length_sn = length_sn;
224 uuid_copy(&hdr->uuid, &tbnet_svc_uuid);
225 uuid_copy(&hdr->initiator_uuid, initiator_uuid);
226 uuid_copy(&hdr->target_uuid, target_uuid);
227 hdr->type = type;
228 hdr->command_id = command_id;
229}
230
231static int tbnet_login_response(struct tbnet *net, u64 route, u8 sequence,
232 u32 command_id)
233{
234 struct thunderbolt_ip_login_response reply;
235 struct tb_xdomain *xd = net->xd;
236
237 memset(&reply, 0, sizeof(reply));
238 tbnet_fill_header(&reply.hdr, route, sequence, xd->local_uuid,
239 xd->remote_uuid, TBIP_LOGIN_RESPONSE, sizeof(reply),
240 command_id);
241 memcpy(reply.receiver_mac, net->dev->dev_addr, ETH_ALEN);
242 reply.receiver_mac_len = ETH_ALEN;
243
244 return tb_xdomain_response(xd, &reply, sizeof(reply),
245 TB_CFG_PKG_XDOMAIN_RESP);
246}
247
248static int tbnet_login_request(struct tbnet *net, u8 sequence)
249{
250 struct thunderbolt_ip_login_response reply;
251 struct thunderbolt_ip_login request;
252 struct tb_xdomain *xd = net->xd;
253
254 memset(&request, 0, sizeof(request));
255 tbnet_fill_header(&request.hdr, xd->route, sequence, xd->local_uuid,
256 xd->remote_uuid, TBIP_LOGIN, sizeof(request),
257 atomic_inc_return(&net->command_id));
258
259 request.proto_version = TBIP_LOGIN_PROTO_VERSION;
260 request.transmit_path = TBNET_LOCAL_PATH;
261
262 return tb_xdomain_request(xd, &request, sizeof(request),
263 TB_CFG_PKG_XDOMAIN_RESP, &reply,
264 sizeof(reply), TB_CFG_PKG_XDOMAIN_RESP,
265 TBNET_LOGIN_TIMEOUT);
266}
267
268static int tbnet_logout_response(struct tbnet *net, u64 route, u8 sequence,
269 u32 command_id)
270{
271 struct thunderbolt_ip_status reply;
272 struct tb_xdomain *xd = net->xd;
273
274 memset(&reply, 0, sizeof(reply));
275 tbnet_fill_header(&reply.hdr, route, sequence, xd->local_uuid,
276 xd->remote_uuid, TBIP_STATUS, sizeof(reply),
277 atomic_inc_return(&net->command_id));
278 return tb_xdomain_response(xd, &reply, sizeof(reply),
279 TB_CFG_PKG_XDOMAIN_RESP);
280}
281
282static int tbnet_logout_request(struct tbnet *net)
283{
284 struct thunderbolt_ip_logout request;
285 struct thunderbolt_ip_status reply;
286 struct tb_xdomain *xd = net->xd;
287
288 memset(&request, 0, sizeof(request));
289 tbnet_fill_header(&request.hdr, xd->route, 0, xd->local_uuid,
290 xd->remote_uuid, TBIP_LOGOUT, sizeof(request),
291 atomic_inc_return(&net->command_id));
292
293 return tb_xdomain_request(xd, &request, sizeof(request),
294 TB_CFG_PKG_XDOMAIN_RESP, &reply,
295 sizeof(reply), TB_CFG_PKG_XDOMAIN_RESP,
296 TBNET_LOGOUT_TIMEOUT);
297}
298
299static void start_login(struct tbnet *net)
300{
301 mutex_lock(&net->connection_lock);
302 net->login_sent = false;
303 net->login_received = false;
304 mutex_unlock(&net->connection_lock);
305
306 queue_delayed_work(system_long_wq, &net->login_work,
307 msecs_to_jiffies(1000));
308}
309
310static void stop_login(struct tbnet *net)
311{
312 cancel_delayed_work_sync(&net->login_work);
313 cancel_work_sync(&net->connected_work);
314}
315
316static inline unsigned int tbnet_frame_size(const struct tbnet_frame *tf)
317{
318 return tf->frame.size ? : TBNET_FRAME_SIZE;
319}
320
321static void tbnet_free_buffers(struct tbnet_ring *ring)
322{
323 unsigned int i;
324
325 for (i = 0; i < TBNET_RING_SIZE; i++) {
326 struct device *dma_dev = tb_ring_dma_device(ring->ring);
327 struct tbnet_frame *tf = &ring->frames[i];
328 enum dma_data_direction dir;
329 unsigned int order;
330 size_t size;
331
332 if (!tf->page)
333 continue;
334
335 if (ring->ring->is_tx) {
336 dir = DMA_TO_DEVICE;
337 order = 0;
338 size = TBNET_FRAME_SIZE;
339 } else {
340 dir = DMA_FROM_DEVICE;
341 order = TBNET_RX_PAGE_ORDER;
342 size = TBNET_RX_PAGE_SIZE;
343 }
344
345 if (tf->frame.buffer_phy)
346 dma_unmap_page(dma_dev, tf->frame.buffer_phy, size,
347 dir);
348
349 __free_pages(tf->page, order);
350 tf->page = NULL;
351 }
352
353 ring->cons = 0;
354 ring->prod = 0;
355}
356
357static void tbnet_tear_down(struct tbnet *net, bool send_logout)
358{
359 netif_carrier_off(net->dev);
360 netif_stop_queue(net->dev);
361
362 stop_login(net);
363
364 mutex_lock(&net->connection_lock);
365
366 if (net->login_sent && net->login_received) {
367 int retries = TBNET_LOGOUT_RETRIES;
368
369 while (send_logout && retries-- > 0) {
370 int ret = tbnet_logout_request(net);
371 if (ret != -ETIMEDOUT)
372 break;
373 }
374
375 tb_ring_stop(net->rx_ring.ring);
376 tb_ring_stop(net->tx_ring.ring);
377 tbnet_free_buffers(&net->rx_ring);
378 tbnet_free_buffers(&net->tx_ring);
379
380 if (tb_xdomain_disable_paths(net->xd))
381 netdev_warn(net->dev, "failed to disable DMA paths\n");
382 }
383
384 net->login_retries = 0;
385 net->login_sent = false;
386 net->login_received = false;
387
388 mutex_unlock(&net->connection_lock);
389}
390
391static int tbnet_handle_packet(const void *buf, size_t size, void *data)
392{
393 const struct thunderbolt_ip_login *pkg = buf;
394 struct tbnet *net = data;
395 u32 command_id;
396 int ret = 0;
397 u32 sequence;
398 u64 route;
399
400
401 if (size < sizeof(struct thunderbolt_ip_header))
402 return 0;
403 if (!uuid_equal(&pkg->hdr.initiator_uuid, net->xd->remote_uuid))
404 return 0;
405 if (!uuid_equal(&pkg->hdr.target_uuid, net->xd->local_uuid))
406 return 0;
407
408 route = ((u64)pkg->hdr.route_hi << 32) | pkg->hdr.route_lo;
409 route &= ~BIT_ULL(63);
410 if (route != net->xd->route)
411 return 0;
412
413 sequence = pkg->hdr.length_sn & TBIP_HDR_SN_MASK;
414 sequence >>= TBIP_HDR_SN_SHIFT;
415 command_id = pkg->hdr.command_id;
416
417 switch (pkg->hdr.type) {
418 case TBIP_LOGIN:
419 if (!netif_running(net->dev))
420 break;
421
422 ret = tbnet_login_response(net, route, sequence,
423 pkg->hdr.command_id);
424 if (!ret) {
425 mutex_lock(&net->connection_lock);
426 net->login_received = true;
427 net->transmit_path = pkg->transmit_path;
428
429
430
431
432
433 if (net->login_retries >= TBNET_LOGIN_RETRIES ||
434 !net->login_sent) {
435 net->login_retries = 0;
436 queue_delayed_work(system_long_wq,
437 &net->login_work, 0);
438 }
439 mutex_unlock(&net->connection_lock);
440
441 queue_work(system_long_wq, &net->connected_work);
442 }
443 break;
444
445 case TBIP_LOGOUT:
446 ret = tbnet_logout_response(net, route, sequence, command_id);
447 if (!ret)
448 tbnet_tear_down(net, false);
449 break;
450
451 default:
452 return 0;
453 }
454
455 if (ret)
456 netdev_warn(net->dev, "failed to send ThunderboltIP response\n");
457
458 return 1;
459}
460
461static unsigned int tbnet_available_buffers(const struct tbnet_ring *ring)
462{
463 return ring->prod - ring->cons;
464}
465
466static int tbnet_alloc_rx_buffers(struct tbnet *net, unsigned int nbuffers)
467{
468 struct tbnet_ring *ring = &net->rx_ring;
469 int ret;
470
471 while (nbuffers--) {
472 struct device *dma_dev = tb_ring_dma_device(ring->ring);
473 unsigned int index = ring->prod & (TBNET_RING_SIZE - 1);
474 struct tbnet_frame *tf = &ring->frames[index];
475 dma_addr_t dma_addr;
476
477 if (tf->page)
478 break;
479
480
481
482
483
484 tf->page = dev_alloc_pages(TBNET_RX_PAGE_ORDER);
485 if (!tf->page) {
486 ret = -ENOMEM;
487 goto err_free;
488 }
489
490 dma_addr = dma_map_page(dma_dev, tf->page, 0,
491 TBNET_RX_PAGE_SIZE, DMA_FROM_DEVICE);
492 if (dma_mapping_error(dma_dev, dma_addr)) {
493 ret = -ENOMEM;
494 goto err_free;
495 }
496
497 tf->frame.buffer_phy = dma_addr;
498 tf->dev = net->dev;
499
500 tb_ring_rx(ring->ring, &tf->frame);
501
502 ring->prod++;
503 }
504
505 return 0;
506
507err_free:
508 tbnet_free_buffers(ring);
509 return ret;
510}
511
512static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net)
513{
514 struct tbnet_ring *ring = &net->tx_ring;
515 struct device *dma_dev = tb_ring_dma_device(ring->ring);
516 struct tbnet_frame *tf;
517 unsigned int index;
518
519 if (!tbnet_available_buffers(ring))
520 return NULL;
521
522 index = ring->cons++ & (TBNET_RING_SIZE - 1);
523
524 tf = &ring->frames[index];
525 tf->frame.size = 0;
526
527 dma_sync_single_for_cpu(dma_dev, tf->frame.buffer_phy,
528 tbnet_frame_size(tf), DMA_TO_DEVICE);
529
530 return tf;
531}
532
533static void tbnet_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
534 bool canceled)
535{
536 struct tbnet_frame *tf = container_of(frame, typeof(*tf), frame);
537 struct tbnet *net = netdev_priv(tf->dev);
538
539
540 net->tx_ring.prod++;
541
542 if (tbnet_available_buffers(&net->tx_ring) >= TBNET_RING_SIZE / 2)
543 netif_wake_queue(net->dev);
544}
545
546static int tbnet_alloc_tx_buffers(struct tbnet *net)
547{
548 struct tbnet_ring *ring = &net->tx_ring;
549 struct device *dma_dev = tb_ring_dma_device(ring->ring);
550 unsigned int i;
551
552 for (i = 0; i < TBNET_RING_SIZE; i++) {
553 struct tbnet_frame *tf = &ring->frames[i];
554 dma_addr_t dma_addr;
555
556 tf->page = alloc_page(GFP_KERNEL);
557 if (!tf->page) {
558 tbnet_free_buffers(ring);
559 return -ENOMEM;
560 }
561
562 dma_addr = dma_map_page(dma_dev, tf->page, 0, TBNET_FRAME_SIZE,
563 DMA_TO_DEVICE);
564 if (dma_mapping_error(dma_dev, dma_addr)) {
565 __free_page(tf->page);
566 tf->page = NULL;
567 tbnet_free_buffers(ring);
568 return -ENOMEM;
569 }
570
571 tf->dev = net->dev;
572 tf->frame.buffer_phy = dma_addr;
573 tf->frame.callback = tbnet_tx_callback;
574 tf->frame.sof = TBIP_PDF_FRAME_START;
575 tf->frame.eof = TBIP_PDF_FRAME_END;
576 }
577
578 ring->cons = 0;
579 ring->prod = TBNET_RING_SIZE - 1;
580
581 return 0;
582}
583
584static void tbnet_connected_work(struct work_struct *work)
585{
586 struct tbnet *net = container_of(work, typeof(*net), connected_work);
587 bool connected;
588 int ret;
589
590 if (netif_carrier_ok(net->dev))
591 return;
592
593 mutex_lock(&net->connection_lock);
594 connected = net->login_sent && net->login_received;
595 mutex_unlock(&net->connection_lock);
596
597 if (!connected)
598 return;
599
600
601
602
603 ret = tb_xdomain_enable_paths(net->xd, TBNET_LOCAL_PATH,
604 net->rx_ring.ring->hop,
605 net->transmit_path,
606 net->tx_ring.ring->hop);
607 if (ret) {
608 netdev_err(net->dev, "failed to enable DMA paths\n");
609 return;
610 }
611
612 tb_ring_start(net->tx_ring.ring);
613 tb_ring_start(net->rx_ring.ring);
614
615 ret = tbnet_alloc_rx_buffers(net, TBNET_RING_SIZE);
616 if (ret)
617 goto err_stop_rings;
618
619 ret = tbnet_alloc_tx_buffers(net);
620 if (ret)
621 goto err_free_rx_buffers;
622
623 netif_carrier_on(net->dev);
624 netif_start_queue(net->dev);
625 return;
626
627err_free_rx_buffers:
628 tbnet_free_buffers(&net->rx_ring);
629err_stop_rings:
630 tb_ring_stop(net->rx_ring.ring);
631 tb_ring_stop(net->tx_ring.ring);
632}
633
634static void tbnet_login_work(struct work_struct *work)
635{
636 struct tbnet *net = container_of(work, typeof(*net), login_work.work);
637 unsigned long delay = msecs_to_jiffies(TBNET_LOGIN_DELAY);
638 int ret;
639
640 if (netif_carrier_ok(net->dev))
641 return;
642
643 ret = tbnet_login_request(net, net->login_retries % 4);
644 if (ret) {
645 if (net->login_retries++ < TBNET_LOGIN_RETRIES) {
646 queue_delayed_work(system_long_wq, &net->login_work,
647 delay);
648 } else {
649 netdev_info(net->dev, "ThunderboltIP login timed out\n");
650 }
651 } else {
652 net->login_retries = 0;
653
654 mutex_lock(&net->connection_lock);
655 net->login_sent = true;
656 mutex_unlock(&net->connection_lock);
657
658 queue_work(system_long_wq, &net->connected_work);
659 }
660}
661
662static bool tbnet_check_frame(struct tbnet *net, const struct tbnet_frame *tf,
663 const struct thunderbolt_ip_frame_header *hdr)
664{
665 u32 frame_id, frame_count, frame_size, frame_index;
666 unsigned int size;
667
668 if (tf->frame.flags & RING_DESC_CRC_ERROR) {
669 net->stats.rx_crc_errors++;
670 return false;
671 } else if (tf->frame.flags & RING_DESC_BUFFER_OVERRUN) {
672 net->stats.rx_over_errors++;
673 return false;
674 }
675
676
677 size = tbnet_frame_size(tf);
678 if (size <= sizeof(*hdr)) {
679 net->stats.rx_length_errors++;
680 return false;
681 }
682
683 frame_count = le32_to_cpu(hdr->frame_count);
684 frame_size = le32_to_cpu(hdr->frame_size);
685 frame_index = le16_to_cpu(hdr->frame_index);
686 frame_id = le16_to_cpu(hdr->frame_id);
687
688 if ((frame_size > size - sizeof(*hdr)) || !frame_size) {
689 net->stats.rx_length_errors++;
690 return false;
691 }
692
693
694
695
696 if (net->skb && net->rx_hdr.frame_count) {
697
698 if (frame_count != net->rx_hdr.frame_count) {
699 net->stats.rx_length_errors++;
700 return false;
701 }
702
703
704
705
706 if (frame_index != net->rx_hdr.frame_index + 1 ||
707 frame_id != net->rx_hdr.frame_id) {
708 net->stats.rx_missed_errors++;
709 return false;
710 }
711
712 if (net->skb->len + frame_size > TBNET_MAX_MTU) {
713 net->stats.rx_length_errors++;
714 return false;
715 }
716
717 return true;
718 }
719
720
721 if (frame_count == 0 || frame_count > TBNET_RING_SIZE / 4) {
722 net->stats.rx_length_errors++;
723 return false;
724 }
725 if (frame_index != 0) {
726 net->stats.rx_missed_errors++;
727 return false;
728 }
729
730 return true;
731}
732
733static int tbnet_poll(struct napi_struct *napi, int budget)
734{
735 struct tbnet *net = container_of(napi, struct tbnet, napi);
736 unsigned int cleaned_count = tbnet_available_buffers(&net->rx_ring);
737 struct device *dma_dev = tb_ring_dma_device(net->rx_ring.ring);
738 unsigned int rx_packets = 0;
739
740 while (rx_packets < budget) {
741 const struct thunderbolt_ip_frame_header *hdr;
742 unsigned int hdr_size = sizeof(*hdr);
743 struct sk_buff *skb = NULL;
744 struct ring_frame *frame;
745 struct tbnet_frame *tf;
746 struct page *page;
747 bool last = true;
748 u32 frame_size;
749
750
751
752
753
754 if (cleaned_count >= MAX_SKB_FRAGS) {
755 tbnet_alloc_rx_buffers(net, cleaned_count);
756 cleaned_count = 0;
757 }
758
759 frame = tb_ring_poll(net->rx_ring.ring);
760 if (!frame)
761 break;
762
763 dma_unmap_page(dma_dev, frame->buffer_phy,
764 TBNET_RX_PAGE_SIZE, DMA_FROM_DEVICE);
765
766 tf = container_of(frame, typeof(*tf), frame);
767
768 page = tf->page;
769 tf->page = NULL;
770 net->rx_ring.cons++;
771 cleaned_count++;
772
773 hdr = page_address(page);
774 if (!tbnet_check_frame(net, tf, hdr)) {
775 __free_pages(page, TBNET_RX_PAGE_ORDER);
776 dev_kfree_skb_any(net->skb);
777 net->skb = NULL;
778 continue;
779 }
780
781 frame_size = le32_to_cpu(hdr->frame_size);
782
783 skb = net->skb;
784 if (!skb) {
785 skb = build_skb(page_address(page),
786 TBNET_RX_PAGE_SIZE);
787 if (!skb) {
788 __free_pages(page, TBNET_RX_PAGE_ORDER);
789 net->stats.rx_errors++;
790 break;
791 }
792
793 skb_reserve(skb, hdr_size);
794 skb_put(skb, frame_size);
795
796 net->skb = skb;
797 } else {
798 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
799 page, hdr_size, frame_size,
800 TBNET_RX_PAGE_SIZE - hdr_size);
801 }
802
803 net->rx_hdr.frame_size = frame_size;
804 net->rx_hdr.frame_count = le32_to_cpu(hdr->frame_count);
805 net->rx_hdr.frame_index = le16_to_cpu(hdr->frame_index);
806 net->rx_hdr.frame_id = le16_to_cpu(hdr->frame_id);
807 last = net->rx_hdr.frame_index == net->rx_hdr.frame_count - 1;
808
809 rx_packets++;
810 net->stats.rx_bytes += frame_size;
811
812 if (last) {
813 skb->protocol = eth_type_trans(skb, net->dev);
814 napi_gro_receive(&net->napi, skb);
815 net->skb = NULL;
816 }
817 }
818
819 net->stats.rx_packets += rx_packets;
820
821 if (cleaned_count)
822 tbnet_alloc_rx_buffers(net, cleaned_count);
823
824 if (rx_packets >= budget)
825 return budget;
826
827 napi_complete_done(napi, rx_packets);
828
829 tb_ring_poll_complete(net->rx_ring.ring);
830
831 return rx_packets;
832}
833
834static void tbnet_start_poll(void *data)
835{
836 struct tbnet *net = data;
837
838 napi_schedule(&net->napi);
839}
840
841static int tbnet_open(struct net_device *dev)
842{
843 struct tbnet *net = netdev_priv(dev);
844 struct tb_xdomain *xd = net->xd;
845 u16 sof_mask, eof_mask;
846 struct tb_ring *ring;
847
848 netif_carrier_off(dev);
849
850 ring = tb_ring_alloc_tx(xd->tb->nhi, -1, TBNET_RING_SIZE,
851 RING_FLAG_FRAME);
852 if (!ring) {
853 netdev_err(dev, "failed to allocate Tx ring\n");
854 return -ENOMEM;
855 }
856 net->tx_ring.ring = ring;
857
858 sof_mask = BIT(TBIP_PDF_FRAME_START);
859 eof_mask = BIT(TBIP_PDF_FRAME_END);
860
861 ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE,
862 RING_FLAG_FRAME | RING_FLAG_E2E, sof_mask,
863 eof_mask, tbnet_start_poll, net);
864 if (!ring) {
865 netdev_err(dev, "failed to allocate Rx ring\n");
866 tb_ring_free(net->tx_ring.ring);
867 net->tx_ring.ring = NULL;
868 return -ENOMEM;
869 }
870 net->rx_ring.ring = ring;
871
872 napi_enable(&net->napi);
873 start_login(net);
874
875 return 0;
876}
877
878static int tbnet_stop(struct net_device *dev)
879{
880 struct tbnet *net = netdev_priv(dev);
881
882 napi_disable(&net->napi);
883
884 tbnet_tear_down(net, true);
885
886 tb_ring_free(net->rx_ring.ring);
887 net->rx_ring.ring = NULL;
888 tb_ring_free(net->tx_ring.ring);
889 net->tx_ring.ring = NULL;
890
891 return 0;
892}
893
894static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
895 struct tbnet_frame **frames, u32 frame_count)
896{
897 struct thunderbolt_ip_frame_header *hdr = page_address(frames[0]->page);
898 struct device *dma_dev = tb_ring_dma_device(net->tx_ring.ring);
899 __wsum wsum = htonl(skb->len - skb_transport_offset(skb));
900 unsigned int i, len, offset = skb_transport_offset(skb);
901 __be16 protocol = skb->protocol;
902 void *data = skb->data;
903 void *dest = hdr + 1;
904 __sum16 *tucso;
905
906 if (skb->ip_summed != CHECKSUM_PARTIAL) {
907
908
909
910 for (i = 0; i < frame_count; i++) {
911 hdr = page_address(frames[i]->page);
912 hdr->frame_count = cpu_to_le32(frame_count);
913 dma_sync_single_for_device(dma_dev,
914 frames[i]->frame.buffer_phy,
915 tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
916 }
917
918 return true;
919 }
920
921 if (protocol == htons(ETH_P_8021Q)) {
922 struct vlan_hdr *vhdr, vh;
923
924 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(vh), &vh);
925 if (!vhdr)
926 return false;
927
928 protocol = vhdr->h_vlan_encapsulated_proto;
929 }
930
931
932
933
934
935
936 if (protocol == htons(ETH_P_IP)) {
937 __sum16 *ipcso = dest + ((void *)&(ip_hdr(skb)->check) - data);
938
939 *ipcso = 0;
940 *ipcso = ip_fast_csum(dest + skb_network_offset(skb),
941 ip_hdr(skb)->ihl);
942
943 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
944 tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data);
945 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
946 tucso = dest + ((void *)&(udp_hdr(skb)->check) - data);
947 else
948 return false;
949
950 *tucso = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
951 ip_hdr(skb)->daddr, 0,
952 ip_hdr(skb)->protocol, 0);
953 } else if (skb_is_gso_v6(skb)) {
954 tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data);
955 *tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
956 &ipv6_hdr(skb)->daddr, 0,
957 IPPROTO_TCP, 0);
958 return false;
959 } else if (protocol == htons(ETH_P_IPV6)) {
960 tucso = dest + skb_checksum_start_offset(skb) + skb->csum_offset;
961 *tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
962 &ipv6_hdr(skb)->daddr, 0,
963 ipv6_hdr(skb)->nexthdr, 0);
964 } else {
965 return false;
966 }
967
968
969
970
971 for (i = 0; i < frame_count; i++) {
972 hdr = page_address(frames[i]->page);
973 dest = (void *)(hdr + 1) + offset;
974 len = le32_to_cpu(hdr->frame_size) - offset;
975 wsum = csum_partial(dest, len, wsum);
976 hdr->frame_count = cpu_to_le32(frame_count);
977
978 offset = 0;
979 }
980
981 *tucso = csum_fold(wsum);
982
983
984
985
986 for (i = 0; i < frame_count; i++) {
987 dma_sync_single_for_device(dma_dev, frames[i]->frame.buffer_phy,
988 tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
989 }
990
991 return true;
992}
993
994static void *tbnet_kmap_frag(struct sk_buff *skb, unsigned int frag_num,
995 unsigned int *len)
996{
997 const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num];
998
999 *len = skb_frag_size(frag);
1000 return kmap_atomic(skb_frag_page(frag)) + frag->page_offset;
1001}
1002
1003static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb,
1004 struct net_device *dev)
1005{
1006 struct tbnet *net = netdev_priv(dev);
1007 struct tbnet_frame *frames[MAX_SKB_FRAGS];
1008 u16 frame_id = atomic_read(&net->frame_id);
1009 struct thunderbolt_ip_frame_header *hdr;
1010 unsigned int len = skb_headlen(skb);
1011 unsigned int data_len = skb->len;
1012 unsigned int nframes, i;
1013 unsigned int frag = 0;
1014 void *src = skb->data;
1015 u32 frame_index = 0;
1016 bool unmap = false;
1017 void *dest;
1018
1019 nframes = DIV_ROUND_UP(data_len, TBNET_MAX_PAYLOAD_SIZE);
1020 if (tbnet_available_buffers(&net->tx_ring) < nframes) {
1021 netif_stop_queue(net->dev);
1022 return NETDEV_TX_BUSY;
1023 }
1024
1025 frames[frame_index] = tbnet_get_tx_buffer(net);
1026 if (!frames[frame_index])
1027 goto err_drop;
1028
1029 hdr = page_address(frames[frame_index]->page);
1030 dest = hdr + 1;
1031
1032
1033 while (data_len > TBNET_MAX_PAYLOAD_SIZE) {
1034 unsigned int size_left = TBNET_MAX_PAYLOAD_SIZE;
1035
1036 hdr->frame_size = cpu_to_le32(TBNET_MAX_PAYLOAD_SIZE);
1037 hdr->frame_index = cpu_to_le16(frame_index);
1038 hdr->frame_id = cpu_to_le16(frame_id);
1039
1040 do {
1041 if (len > size_left) {
1042
1043
1044
1045
1046 memcpy(dest, src, size_left);
1047 len -= size_left;
1048 dest += size_left;
1049 src += size_left;
1050 break;
1051 }
1052
1053 memcpy(dest, src, len);
1054 size_left -= len;
1055 dest += len;
1056
1057 if (unmap) {
1058 kunmap_atomic(src);
1059 unmap = false;
1060 }
1061
1062
1063 if (frag < skb_shinfo(skb)->nr_frags) {
1064
1065 src = tbnet_kmap_frag(skb, frag++, &len);
1066 unmap = true;
1067 } else if (unlikely(size_left > 0)) {
1068 goto err_drop;
1069 }
1070 } while (size_left > 0);
1071
1072 data_len -= TBNET_MAX_PAYLOAD_SIZE;
1073 frame_index++;
1074
1075 frames[frame_index] = tbnet_get_tx_buffer(net);
1076 if (!frames[frame_index])
1077 goto err_drop;
1078
1079 hdr = page_address(frames[frame_index]->page);
1080 dest = hdr + 1;
1081 }
1082
1083 hdr->frame_size = cpu_to_le32(data_len);
1084 hdr->frame_index = cpu_to_le16(frame_index);
1085 hdr->frame_id = cpu_to_le16(frame_id);
1086
1087 frames[frame_index]->frame.size = data_len + sizeof(*hdr);
1088
1089
1090 while (len < data_len) {
1091 memcpy(dest, src, len);
1092 data_len -= len;
1093 dest += len;
1094
1095 if (unmap) {
1096 kunmap_atomic(src);
1097 unmap = false;
1098 }
1099
1100 if (frag < skb_shinfo(skb)->nr_frags) {
1101 src = tbnet_kmap_frag(skb, frag++, &len);
1102 unmap = true;
1103 } else if (unlikely(data_len > 0)) {
1104 goto err_drop;
1105 }
1106 }
1107
1108 memcpy(dest, src, data_len);
1109
1110 if (unmap)
1111 kunmap_atomic(src);
1112
1113 if (!tbnet_xmit_csum_and_map(net, skb, frames, frame_index + 1))
1114 goto err_drop;
1115
1116 for (i = 0; i < frame_index + 1; i++)
1117 tb_ring_tx(net->tx_ring.ring, &frames[i]->frame);
1118
1119 if (net->svc->prtcstns & TBNET_MATCH_FRAGS_ID)
1120 atomic_inc(&net->frame_id);
1121
1122 net->stats.tx_packets++;
1123 net->stats.tx_bytes += skb->len;
1124
1125 dev_consume_skb_any(skb);
1126
1127 return NETDEV_TX_OK;
1128
1129err_drop:
1130
1131 net->tx_ring.cons -= frame_index;
1132
1133 dev_kfree_skb_any(skb);
1134 net->stats.tx_errors++;
1135
1136 return NETDEV_TX_OK;
1137}
1138
1139static void tbnet_get_stats64(struct net_device *dev,
1140 struct rtnl_link_stats64 *stats)
1141{
1142 struct tbnet *net = netdev_priv(dev);
1143
1144 stats->tx_packets = net->stats.tx_packets;
1145 stats->rx_packets = net->stats.rx_packets;
1146 stats->tx_bytes = net->stats.tx_bytes;
1147 stats->rx_bytes = net->stats.rx_bytes;
1148 stats->rx_errors = net->stats.rx_errors + net->stats.rx_length_errors +
1149 net->stats.rx_over_errors + net->stats.rx_crc_errors +
1150 net->stats.rx_missed_errors;
1151 stats->tx_errors = net->stats.tx_errors;
1152 stats->rx_length_errors = net->stats.rx_length_errors;
1153 stats->rx_over_errors = net->stats.rx_over_errors;
1154 stats->rx_crc_errors = net->stats.rx_crc_errors;
1155 stats->rx_missed_errors = net->stats.rx_missed_errors;
1156}
1157
1158static const struct net_device_ops tbnet_netdev_ops = {
1159 .ndo_open = tbnet_open,
1160 .ndo_stop = tbnet_stop,
1161 .ndo_start_xmit = tbnet_start_xmit,
1162 .ndo_get_stats64 = tbnet_get_stats64,
1163};
1164
1165static void tbnet_generate_mac(struct net_device *dev)
1166{
1167 const struct tbnet *net = netdev_priv(dev);
1168 const struct tb_xdomain *xd = net->xd;
1169 u8 phy_port;
1170 u32 hash;
1171
1172 phy_port = tb_phy_port_from_link(TBNET_L0_PORT_NUM(xd->route));
1173
1174
1175 dev->dev_addr[0] = phy_port << 4 | 0x02;
1176 hash = jhash2((u32 *)xd->local_uuid, 4, 0);
1177 memcpy(dev->dev_addr + 1, &hash, sizeof(hash));
1178 hash = jhash2((u32 *)xd->local_uuid, 4, hash);
1179 dev->dev_addr[5] = hash & 0xff;
1180}
1181
1182static int tbnet_probe(struct tb_service *svc, const struct tb_service_id *id)
1183{
1184 struct tb_xdomain *xd = tb_service_parent(svc);
1185 struct net_device *dev;
1186 struct tbnet *net;
1187 int ret;
1188
1189 dev = alloc_etherdev(sizeof(*net));
1190 if (!dev)
1191 return -ENOMEM;
1192
1193 SET_NETDEV_DEV(dev, &svc->dev);
1194
1195 net = netdev_priv(dev);
1196 INIT_DELAYED_WORK(&net->login_work, tbnet_login_work);
1197 INIT_WORK(&net->connected_work, tbnet_connected_work);
1198 mutex_init(&net->connection_lock);
1199 atomic_set(&net->command_id, 0);
1200 atomic_set(&net->frame_id, 0);
1201 net->svc = svc;
1202 net->dev = dev;
1203 net->xd = xd;
1204
1205 tbnet_generate_mac(dev);
1206
1207 strcpy(dev->name, "thunderbolt%d");
1208 dev->netdev_ops = &tbnet_netdev_ops;
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223 dev->hw_features = NETIF_F_SG | NETIF_F_ALL_TSO | NETIF_F_GRO |
1224 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1225 dev->features = dev->hw_features | NETIF_F_HIGHDMA;
1226 dev->hard_header_len += sizeof(struct thunderbolt_ip_frame_header);
1227
1228 netif_napi_add(dev, &net->napi, tbnet_poll, NAPI_POLL_WEIGHT);
1229
1230
1231 dev->min_mtu = ETH_MIN_MTU;
1232 dev->max_mtu = TBNET_MAX_MTU - ETH_HLEN;
1233
1234 net->handler.uuid = &tbnet_svc_uuid;
1235 net->handler.callback = tbnet_handle_packet,
1236 net->handler.data = net;
1237 tb_register_protocol_handler(&net->handler);
1238
1239 tb_service_set_drvdata(svc, net);
1240
1241 ret = register_netdev(dev);
1242 if (ret) {
1243 tb_unregister_protocol_handler(&net->handler);
1244 free_netdev(dev);
1245 return ret;
1246 }
1247
1248 return 0;
1249}
1250
1251static void tbnet_remove(struct tb_service *svc)
1252{
1253 struct tbnet *net = tb_service_get_drvdata(svc);
1254
1255 unregister_netdev(net->dev);
1256 tb_unregister_protocol_handler(&net->handler);
1257 free_netdev(net->dev);
1258}
1259
1260static void tbnet_shutdown(struct tb_service *svc)
1261{
1262 tbnet_tear_down(tb_service_get_drvdata(svc), true);
1263}
1264
1265static int __maybe_unused tbnet_suspend(struct device *dev)
1266{
1267 struct tb_service *svc = tb_to_service(dev);
1268 struct tbnet *net = tb_service_get_drvdata(svc);
1269
1270 stop_login(net);
1271 if (netif_running(net->dev)) {
1272 netif_device_detach(net->dev);
1273 tb_ring_stop(net->rx_ring.ring);
1274 tb_ring_stop(net->tx_ring.ring);
1275 tbnet_free_buffers(&net->rx_ring);
1276 tbnet_free_buffers(&net->tx_ring);
1277 }
1278
1279 return 0;
1280}
1281
1282static int __maybe_unused tbnet_resume(struct device *dev)
1283{
1284 struct tb_service *svc = tb_to_service(dev);
1285 struct tbnet *net = tb_service_get_drvdata(svc);
1286
1287 netif_carrier_off(net->dev);
1288 if (netif_running(net->dev)) {
1289 netif_device_attach(net->dev);
1290 start_login(net);
1291 }
1292
1293 return 0;
1294}
1295
1296static const struct dev_pm_ops tbnet_pm_ops = {
1297 SET_SYSTEM_SLEEP_PM_OPS(tbnet_suspend, tbnet_resume)
1298};
1299
1300static const struct tb_service_id tbnet_ids[] = {
1301 { TB_SERVICE("network", 1) },
1302 { },
1303};
1304MODULE_DEVICE_TABLE(tbsvc, tbnet_ids);
1305
1306static struct tb_service_driver tbnet_driver = {
1307 .driver = {
1308 .owner = THIS_MODULE,
1309 .name = "thunderbolt-net",
1310 .pm = &tbnet_pm_ops,
1311 },
1312 .probe = tbnet_probe,
1313 .remove = tbnet_remove,
1314 .shutdown = tbnet_shutdown,
1315 .id_table = tbnet_ids,
1316};
1317
1318static int __init tbnet_init(void)
1319{
1320 int ret;
1321
1322 tbnet_dir = tb_property_create_dir(&tbnet_dir_uuid);
1323 if (!tbnet_dir)
1324 return -ENOMEM;
1325
1326 tb_property_add_immediate(tbnet_dir, "prtcid", 1);
1327 tb_property_add_immediate(tbnet_dir, "prtcvers", 1);
1328 tb_property_add_immediate(tbnet_dir, "prtcrevs", 1);
1329 tb_property_add_immediate(tbnet_dir, "prtcstns",
1330 TBNET_MATCH_FRAGS_ID);
1331
1332 ret = tb_register_property_dir("network", tbnet_dir);
1333 if (ret) {
1334 tb_property_free_dir(tbnet_dir);
1335 return ret;
1336 }
1337
1338 return tb_register_service_driver(&tbnet_driver);
1339}
1340module_init(tbnet_init);
1341
1342static void __exit tbnet_exit(void)
1343{
1344 tb_unregister_service_driver(&tbnet_driver);
1345 tb_unregister_property_dir("network", tbnet_dir);
1346 tb_property_free_dir(tbnet_dir);
1347}
1348module_exit(tbnet_exit);
1349
1350MODULE_AUTHOR("Amir Levy <amir.jer.levy@intel.com>");
1351MODULE_AUTHOR("Michael Jamet <michael.jamet@intel.com>");
1352MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
1353MODULE_DESCRIPTION("Thunderbolt network driver");
1354MODULE_LICENSE("GPL v2");
1355