1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/atomic.h>
15#include <linux/highmem.h>
16#include <linux/if_vlan.h>
17#include <linux/jhash.h>
18#include <linux/module.h>
19#include <linux/etherdevice.h>
20#include <linux/rtnetlink.h>
21#include <linux/sizes.h>
22#include <linux/thunderbolt.h>
23#include <linux/uuid.h>
24#include <linux/workqueue.h>
25
26#include <net/ip6_checksum.h>
27
28
29#define TBNET_LOGIN_DELAY 4500
30#define TBNET_LOGIN_TIMEOUT 500
31#define TBNET_LOGOUT_TIMEOUT 100
32
33#define TBNET_RING_SIZE 256
34#define TBNET_LOCAL_PATH 0xf
35#define TBNET_LOGIN_RETRIES 60
36#define TBNET_LOGOUT_RETRIES 5
37#define TBNET_MATCH_FRAGS_ID BIT(1)
38#define TBNET_MAX_MTU SZ_64K
39#define TBNET_FRAME_SIZE SZ_4K
40#define TBNET_MAX_PAYLOAD_SIZE \
41 (TBNET_FRAME_SIZE - sizeof(struct thunderbolt_ip_frame_header))
42
43#define TBNET_RX_MAX_SIZE \
44 (TBNET_FRAME_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
45#define TBNET_RX_PAGE_ORDER get_order(TBNET_RX_MAX_SIZE)
46#define TBNET_RX_PAGE_SIZE (PAGE_SIZE << TBNET_RX_PAGE_ORDER)
47
48#define TBNET_L0_PORT_NUM(route) ((route) & GENMASK(5, 0))
49
50
51
52
53
54
55
56
57
58
59
60
61struct thunderbolt_ip_frame_header {
62 u32 frame_size;
63 u16 frame_index;
64 u16 frame_id;
65 u32 frame_count;
66};
67
68enum thunderbolt_ip_frame_pdf {
69 TBIP_PDF_FRAME_START = 1,
70 TBIP_PDF_FRAME_END,
71};
72
73enum thunderbolt_ip_type {
74 TBIP_LOGIN,
75 TBIP_LOGIN_RESPONSE,
76 TBIP_LOGOUT,
77 TBIP_STATUS,
78};
79
80struct thunderbolt_ip_header {
81 u32 route_hi;
82 u32 route_lo;
83 u32 length_sn;
84 uuid_t uuid;
85 uuid_t initiator_uuid;
86 uuid_t target_uuid;
87 u32 type;
88 u32 command_id;
89};
90
91#define TBIP_HDR_LENGTH_MASK GENMASK(5, 0)
92#define TBIP_HDR_SN_MASK GENMASK(28, 27)
93#define TBIP_HDR_SN_SHIFT 27
94
95struct thunderbolt_ip_login {
96 struct thunderbolt_ip_header hdr;
97 u32 proto_version;
98 u32 transmit_path;
99 u32 reserved[4];
100};
101
102#define TBIP_LOGIN_PROTO_VERSION 1
103
104struct thunderbolt_ip_login_response {
105 struct thunderbolt_ip_header hdr;
106 u32 status;
107 u32 receiver_mac[2];
108 u32 receiver_mac_len;
109 u32 reserved[4];
110};
111
112struct thunderbolt_ip_logout {
113 struct thunderbolt_ip_header hdr;
114};
115
116struct thunderbolt_ip_status {
117 struct thunderbolt_ip_header hdr;
118 u32 status;
119};
120
121struct tbnet_stats {
122 u64 tx_packets;
123 u64 rx_packets;
124 u64 tx_bytes;
125 u64 rx_bytes;
126 u64 rx_errors;
127 u64 tx_errors;
128 u64 rx_length_errors;
129 u64 rx_over_errors;
130 u64 rx_crc_errors;
131 u64 rx_missed_errors;
132};
133
134struct tbnet_frame {
135 struct net_device *dev;
136 struct page *page;
137 struct ring_frame frame;
138};
139
140struct tbnet_ring {
141 struct tbnet_frame frames[TBNET_RING_SIZE];
142 unsigned int cons;
143 unsigned int prod;
144 struct tb_ring *ring;
145};
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179struct tbnet {
180 const struct tb_service *svc;
181 struct tb_xdomain *xd;
182 struct tb_protocol_handler handler;
183 struct net_device *dev;
184 struct napi_struct napi;
185 struct tbnet_stats stats;
186 struct sk_buff *skb;
187 atomic_t command_id;
188 bool login_sent;
189 bool login_received;
190 u32 transmit_path;
191 struct mutex connection_lock;
192 int login_retries;
193 struct delayed_work login_work;
194 struct work_struct connected_work;
195 struct work_struct disconnect_work;
196 struct thunderbolt_ip_frame_header rx_hdr;
197 struct tbnet_ring rx_ring;
198 atomic_t frame_id;
199 struct tbnet_ring tx_ring;
200};
201
202
203static const uuid_t tbnet_dir_uuid =
204 UUID_INIT(0xc66189ca, 0x1cce, 0x4195,
205 0xbd, 0xb8, 0x49, 0x59, 0x2e, 0x5f, 0x5a, 0x4f);
206
207
208static const uuid_t tbnet_svc_uuid =
209 UUID_INIT(0x798f589e, 0x3616, 0x8a47,
210 0x97, 0xc6, 0x56, 0x64, 0xa9, 0x20, 0xc8, 0xdd);
211
212static struct tb_property_dir *tbnet_dir;
213
214static void tbnet_fill_header(struct thunderbolt_ip_header *hdr, u64 route,
215 u8 sequence, const uuid_t *initiator_uuid, const uuid_t *target_uuid,
216 enum thunderbolt_ip_type type, size_t size, u32 command_id)
217{
218 u32 length_sn;
219
220
221 length_sn = (size - 3 * 4) / 4;
222 length_sn |= (sequence << TBIP_HDR_SN_SHIFT) & TBIP_HDR_SN_MASK;
223
224 hdr->route_hi = upper_32_bits(route);
225 hdr->route_lo = lower_32_bits(route);
226 hdr->length_sn = length_sn;
227 uuid_copy(&hdr->uuid, &tbnet_svc_uuid);
228 uuid_copy(&hdr->initiator_uuid, initiator_uuid);
229 uuid_copy(&hdr->target_uuid, target_uuid);
230 hdr->type = type;
231 hdr->command_id = command_id;
232}
233
234static int tbnet_login_response(struct tbnet *net, u64 route, u8 sequence,
235 u32 command_id)
236{
237 struct thunderbolt_ip_login_response reply;
238 struct tb_xdomain *xd = net->xd;
239
240 memset(&reply, 0, sizeof(reply));
241 tbnet_fill_header(&reply.hdr, route, sequence, xd->local_uuid,
242 xd->remote_uuid, TBIP_LOGIN_RESPONSE, sizeof(reply),
243 command_id);
244 memcpy(reply.receiver_mac, net->dev->dev_addr, ETH_ALEN);
245 reply.receiver_mac_len = ETH_ALEN;
246
247 return tb_xdomain_response(xd, &reply, sizeof(reply),
248 TB_CFG_PKG_XDOMAIN_RESP);
249}
250
251static int tbnet_login_request(struct tbnet *net, u8 sequence)
252{
253 struct thunderbolt_ip_login_response reply;
254 struct thunderbolt_ip_login request;
255 struct tb_xdomain *xd = net->xd;
256
257 memset(&request, 0, sizeof(request));
258 tbnet_fill_header(&request.hdr, xd->route, sequence, xd->local_uuid,
259 xd->remote_uuid, TBIP_LOGIN, sizeof(request),
260 atomic_inc_return(&net->command_id));
261
262 request.proto_version = TBIP_LOGIN_PROTO_VERSION;
263 request.transmit_path = TBNET_LOCAL_PATH;
264
265 return tb_xdomain_request(xd, &request, sizeof(request),
266 TB_CFG_PKG_XDOMAIN_RESP, &reply,
267 sizeof(reply), TB_CFG_PKG_XDOMAIN_RESP,
268 TBNET_LOGIN_TIMEOUT);
269}
270
271static int tbnet_logout_response(struct tbnet *net, u64 route, u8 sequence,
272 u32 command_id)
273{
274 struct thunderbolt_ip_status reply;
275 struct tb_xdomain *xd = net->xd;
276
277 memset(&reply, 0, sizeof(reply));
278 tbnet_fill_header(&reply.hdr, route, sequence, xd->local_uuid,
279 xd->remote_uuid, TBIP_STATUS, sizeof(reply),
280 atomic_inc_return(&net->command_id));
281 return tb_xdomain_response(xd, &reply, sizeof(reply),
282 TB_CFG_PKG_XDOMAIN_RESP);
283}
284
285static int tbnet_logout_request(struct tbnet *net)
286{
287 struct thunderbolt_ip_logout request;
288 struct thunderbolt_ip_status reply;
289 struct tb_xdomain *xd = net->xd;
290
291 memset(&request, 0, sizeof(request));
292 tbnet_fill_header(&request.hdr, xd->route, 0, xd->local_uuid,
293 xd->remote_uuid, TBIP_LOGOUT, sizeof(request),
294 atomic_inc_return(&net->command_id));
295
296 return tb_xdomain_request(xd, &request, sizeof(request),
297 TB_CFG_PKG_XDOMAIN_RESP, &reply,
298 sizeof(reply), TB_CFG_PKG_XDOMAIN_RESP,
299 TBNET_LOGOUT_TIMEOUT);
300}
301
302static void start_login(struct tbnet *net)
303{
304 mutex_lock(&net->connection_lock);
305 net->login_sent = false;
306 net->login_received = false;
307 mutex_unlock(&net->connection_lock);
308
309 queue_delayed_work(system_long_wq, &net->login_work,
310 msecs_to_jiffies(1000));
311}
312
313static void stop_login(struct tbnet *net)
314{
315 cancel_delayed_work_sync(&net->login_work);
316 cancel_work_sync(&net->connected_work);
317}
318
319static inline unsigned int tbnet_frame_size(const struct tbnet_frame *tf)
320{
321 return tf->frame.size ? : TBNET_FRAME_SIZE;
322}
323
324static void tbnet_free_buffers(struct tbnet_ring *ring)
325{
326 unsigned int i;
327
328 for (i = 0; i < TBNET_RING_SIZE; i++) {
329 struct device *dma_dev = tb_ring_dma_device(ring->ring);
330 struct tbnet_frame *tf = &ring->frames[i];
331 enum dma_data_direction dir;
332 unsigned int order;
333 size_t size;
334
335 if (!tf->page)
336 continue;
337
338 if (ring->ring->is_tx) {
339 dir = DMA_TO_DEVICE;
340 order = 0;
341 size = TBNET_FRAME_SIZE;
342 } else {
343 dir = DMA_FROM_DEVICE;
344 order = TBNET_RX_PAGE_ORDER;
345 size = TBNET_RX_PAGE_SIZE;
346 }
347
348 if (tf->frame.buffer_phy)
349 dma_unmap_page(dma_dev, tf->frame.buffer_phy, size,
350 dir);
351
352 __free_pages(tf->page, order);
353 tf->page = NULL;
354 }
355
356 ring->cons = 0;
357 ring->prod = 0;
358}
359
360static void tbnet_tear_down(struct tbnet *net, bool send_logout)
361{
362 netif_carrier_off(net->dev);
363 netif_stop_queue(net->dev);
364
365 stop_login(net);
366
367 mutex_lock(&net->connection_lock);
368
369 if (net->login_sent && net->login_received) {
370 int retries = TBNET_LOGOUT_RETRIES;
371
372 while (send_logout && retries-- > 0) {
373 int ret = tbnet_logout_request(net);
374 if (ret != -ETIMEDOUT)
375 break;
376 }
377
378 tb_ring_stop(net->rx_ring.ring);
379 tb_ring_stop(net->tx_ring.ring);
380 tbnet_free_buffers(&net->rx_ring);
381 tbnet_free_buffers(&net->tx_ring);
382
383 if (tb_xdomain_disable_paths(net->xd))
384 netdev_warn(net->dev, "failed to disable DMA paths\n");
385 }
386
387 net->login_retries = 0;
388 net->login_sent = false;
389 net->login_received = false;
390
391 mutex_unlock(&net->connection_lock);
392}
393
394static int tbnet_handle_packet(const void *buf, size_t size, void *data)
395{
396 const struct thunderbolt_ip_login *pkg = buf;
397 struct tbnet *net = data;
398 u32 command_id;
399 int ret = 0;
400 u32 sequence;
401 u64 route;
402
403
404 if (size < sizeof(struct thunderbolt_ip_header))
405 return 0;
406 if (!uuid_equal(&pkg->hdr.initiator_uuid, net->xd->remote_uuid))
407 return 0;
408 if (!uuid_equal(&pkg->hdr.target_uuid, net->xd->local_uuid))
409 return 0;
410
411 route = ((u64)pkg->hdr.route_hi << 32) | pkg->hdr.route_lo;
412 route &= ~BIT_ULL(63);
413 if (route != net->xd->route)
414 return 0;
415
416 sequence = pkg->hdr.length_sn & TBIP_HDR_SN_MASK;
417 sequence >>= TBIP_HDR_SN_SHIFT;
418 command_id = pkg->hdr.command_id;
419
420 switch (pkg->hdr.type) {
421 case TBIP_LOGIN:
422 if (!netif_running(net->dev))
423 break;
424
425 ret = tbnet_login_response(net, route, sequence,
426 pkg->hdr.command_id);
427 if (!ret) {
428 mutex_lock(&net->connection_lock);
429 net->login_received = true;
430 net->transmit_path = pkg->transmit_path;
431
432
433
434
435
436 if (net->login_retries >= TBNET_LOGIN_RETRIES ||
437 !net->login_sent) {
438 net->login_retries = 0;
439 queue_delayed_work(system_long_wq,
440 &net->login_work, 0);
441 }
442 mutex_unlock(&net->connection_lock);
443
444 queue_work(system_long_wq, &net->connected_work);
445 }
446 break;
447
448 case TBIP_LOGOUT:
449 ret = tbnet_logout_response(net, route, sequence, command_id);
450 if (!ret)
451 queue_work(system_long_wq, &net->disconnect_work);
452 break;
453
454 default:
455 return 0;
456 }
457
458 if (ret)
459 netdev_warn(net->dev, "failed to send ThunderboltIP response\n");
460
461 return 1;
462}
463
464static unsigned int tbnet_available_buffers(const struct tbnet_ring *ring)
465{
466 return ring->prod - ring->cons;
467}
468
469static int tbnet_alloc_rx_buffers(struct tbnet *net, unsigned int nbuffers)
470{
471 struct tbnet_ring *ring = &net->rx_ring;
472 int ret;
473
474 while (nbuffers--) {
475 struct device *dma_dev = tb_ring_dma_device(ring->ring);
476 unsigned int index = ring->prod & (TBNET_RING_SIZE - 1);
477 struct tbnet_frame *tf = &ring->frames[index];
478 dma_addr_t dma_addr;
479
480 if (tf->page)
481 break;
482
483
484
485
486
487 tf->page = dev_alloc_pages(TBNET_RX_PAGE_ORDER);
488 if (!tf->page) {
489 ret = -ENOMEM;
490 goto err_free;
491 }
492
493 dma_addr = dma_map_page(dma_dev, tf->page, 0,
494 TBNET_RX_PAGE_SIZE, DMA_FROM_DEVICE);
495 if (dma_mapping_error(dma_dev, dma_addr)) {
496 ret = -ENOMEM;
497 goto err_free;
498 }
499
500 tf->frame.buffer_phy = dma_addr;
501 tf->dev = net->dev;
502
503 tb_ring_rx(ring->ring, &tf->frame);
504
505 ring->prod++;
506 }
507
508 return 0;
509
510err_free:
511 tbnet_free_buffers(ring);
512 return ret;
513}
514
515static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net)
516{
517 struct tbnet_ring *ring = &net->tx_ring;
518 struct device *dma_dev = tb_ring_dma_device(ring->ring);
519 struct tbnet_frame *tf;
520 unsigned int index;
521
522 if (!tbnet_available_buffers(ring))
523 return NULL;
524
525 index = ring->cons++ & (TBNET_RING_SIZE - 1);
526
527 tf = &ring->frames[index];
528 tf->frame.size = 0;
529
530 dma_sync_single_for_cpu(dma_dev, tf->frame.buffer_phy,
531 tbnet_frame_size(tf), DMA_TO_DEVICE);
532
533 return tf;
534}
535
536static void tbnet_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
537 bool canceled)
538{
539 struct tbnet_frame *tf = container_of(frame, typeof(*tf), frame);
540 struct tbnet *net = netdev_priv(tf->dev);
541
542
543 net->tx_ring.prod++;
544
545 if (tbnet_available_buffers(&net->tx_ring) >= TBNET_RING_SIZE / 2)
546 netif_wake_queue(net->dev);
547}
548
549static int tbnet_alloc_tx_buffers(struct tbnet *net)
550{
551 struct tbnet_ring *ring = &net->tx_ring;
552 struct device *dma_dev = tb_ring_dma_device(ring->ring);
553 unsigned int i;
554
555 for (i = 0; i < TBNET_RING_SIZE; i++) {
556 struct tbnet_frame *tf = &ring->frames[i];
557 dma_addr_t dma_addr;
558
559 tf->page = alloc_page(GFP_KERNEL);
560 if (!tf->page) {
561 tbnet_free_buffers(ring);
562 return -ENOMEM;
563 }
564
565 dma_addr = dma_map_page(dma_dev, tf->page, 0, TBNET_FRAME_SIZE,
566 DMA_TO_DEVICE);
567 if (dma_mapping_error(dma_dev, dma_addr)) {
568 __free_page(tf->page);
569 tf->page = NULL;
570 tbnet_free_buffers(ring);
571 return -ENOMEM;
572 }
573
574 tf->dev = net->dev;
575 tf->frame.buffer_phy = dma_addr;
576 tf->frame.callback = tbnet_tx_callback;
577 tf->frame.sof = TBIP_PDF_FRAME_START;
578 tf->frame.eof = TBIP_PDF_FRAME_END;
579 }
580
581 ring->cons = 0;
582 ring->prod = TBNET_RING_SIZE - 1;
583
584 return 0;
585}
586
587static void tbnet_connected_work(struct work_struct *work)
588{
589 struct tbnet *net = container_of(work, typeof(*net), connected_work);
590 bool connected;
591 int ret;
592
593 if (netif_carrier_ok(net->dev))
594 return;
595
596 mutex_lock(&net->connection_lock);
597 connected = net->login_sent && net->login_received;
598 mutex_unlock(&net->connection_lock);
599
600 if (!connected)
601 return;
602
603
604
605
606 ret = tb_xdomain_enable_paths(net->xd, TBNET_LOCAL_PATH,
607 net->rx_ring.ring->hop,
608 net->transmit_path,
609 net->tx_ring.ring->hop);
610 if (ret) {
611 netdev_err(net->dev, "failed to enable DMA paths\n");
612 return;
613 }
614
615 tb_ring_start(net->tx_ring.ring);
616 tb_ring_start(net->rx_ring.ring);
617
618 ret = tbnet_alloc_rx_buffers(net, TBNET_RING_SIZE);
619 if (ret)
620 goto err_stop_rings;
621
622 ret = tbnet_alloc_tx_buffers(net);
623 if (ret)
624 goto err_free_rx_buffers;
625
626 netif_carrier_on(net->dev);
627 netif_start_queue(net->dev);
628 return;
629
630err_free_rx_buffers:
631 tbnet_free_buffers(&net->rx_ring);
632err_stop_rings:
633 tb_ring_stop(net->rx_ring.ring);
634 tb_ring_stop(net->tx_ring.ring);
635}
636
637static void tbnet_login_work(struct work_struct *work)
638{
639 struct tbnet *net = container_of(work, typeof(*net), login_work.work);
640 unsigned long delay = msecs_to_jiffies(TBNET_LOGIN_DELAY);
641 int ret;
642
643 if (netif_carrier_ok(net->dev))
644 return;
645
646 ret = tbnet_login_request(net, net->login_retries % 4);
647 if (ret) {
648 if (net->login_retries++ < TBNET_LOGIN_RETRIES) {
649 queue_delayed_work(system_long_wq, &net->login_work,
650 delay);
651 } else {
652 netdev_info(net->dev, "ThunderboltIP login timed out\n");
653 }
654 } else {
655 net->login_retries = 0;
656
657 mutex_lock(&net->connection_lock);
658 net->login_sent = true;
659 mutex_unlock(&net->connection_lock);
660
661 queue_work(system_long_wq, &net->connected_work);
662 }
663}
664
665static void tbnet_disconnect_work(struct work_struct *work)
666{
667 struct tbnet *net = container_of(work, typeof(*net), disconnect_work);
668
669 tbnet_tear_down(net, false);
670}
671
672static bool tbnet_check_frame(struct tbnet *net, const struct tbnet_frame *tf,
673 const struct thunderbolt_ip_frame_header *hdr)
674{
675 u32 frame_id, frame_count, frame_size, frame_index;
676 unsigned int size;
677
678 if (tf->frame.flags & RING_DESC_CRC_ERROR) {
679 net->stats.rx_crc_errors++;
680 return false;
681 } else if (tf->frame.flags & RING_DESC_BUFFER_OVERRUN) {
682 net->stats.rx_over_errors++;
683 return false;
684 }
685
686
687 size = tbnet_frame_size(tf);
688 if (size <= sizeof(*hdr)) {
689 net->stats.rx_length_errors++;
690 return false;
691 }
692
693 frame_count = le32_to_cpu(hdr->frame_count);
694 frame_size = le32_to_cpu(hdr->frame_size);
695 frame_index = le16_to_cpu(hdr->frame_index);
696 frame_id = le16_to_cpu(hdr->frame_id);
697
698 if ((frame_size > size - sizeof(*hdr)) || !frame_size) {
699 net->stats.rx_length_errors++;
700 return false;
701 }
702
703
704
705
706 if (net->skb && net->rx_hdr.frame_count) {
707
708 if (frame_count != net->rx_hdr.frame_count) {
709 net->stats.rx_length_errors++;
710 return false;
711 }
712
713
714
715
716 if (frame_index != net->rx_hdr.frame_index + 1 ||
717 frame_id != net->rx_hdr.frame_id) {
718 net->stats.rx_missed_errors++;
719 return false;
720 }
721
722 if (net->skb->len + frame_size > TBNET_MAX_MTU) {
723 net->stats.rx_length_errors++;
724 return false;
725 }
726
727 return true;
728 }
729
730
731 if (frame_count == 0 || frame_count > TBNET_RING_SIZE / 4) {
732 net->stats.rx_length_errors++;
733 return false;
734 }
735 if (frame_index != 0) {
736 net->stats.rx_missed_errors++;
737 return false;
738 }
739
740 return true;
741}
742
743static int tbnet_poll(struct napi_struct *napi, int budget)
744{
745 struct tbnet *net = container_of(napi, struct tbnet, napi);
746 unsigned int cleaned_count = tbnet_available_buffers(&net->rx_ring);
747 struct device *dma_dev = tb_ring_dma_device(net->rx_ring.ring);
748 unsigned int rx_packets = 0;
749
750 while (rx_packets < budget) {
751 const struct thunderbolt_ip_frame_header *hdr;
752 unsigned int hdr_size = sizeof(*hdr);
753 struct sk_buff *skb = NULL;
754 struct ring_frame *frame;
755 struct tbnet_frame *tf;
756 struct page *page;
757 bool last = true;
758 u32 frame_size;
759
760
761
762
763
764 if (cleaned_count >= MAX_SKB_FRAGS) {
765 tbnet_alloc_rx_buffers(net, cleaned_count);
766 cleaned_count = 0;
767 }
768
769 frame = tb_ring_poll(net->rx_ring.ring);
770 if (!frame)
771 break;
772
773 dma_unmap_page(dma_dev, frame->buffer_phy,
774 TBNET_RX_PAGE_SIZE, DMA_FROM_DEVICE);
775
776 tf = container_of(frame, typeof(*tf), frame);
777
778 page = tf->page;
779 tf->page = NULL;
780 net->rx_ring.cons++;
781 cleaned_count++;
782
783 hdr = page_address(page);
784 if (!tbnet_check_frame(net, tf, hdr)) {
785 __free_pages(page, TBNET_RX_PAGE_ORDER);
786 dev_kfree_skb_any(net->skb);
787 net->skb = NULL;
788 continue;
789 }
790
791 frame_size = le32_to_cpu(hdr->frame_size);
792
793 skb = net->skb;
794 if (!skb) {
795 skb = build_skb(page_address(page),
796 TBNET_RX_PAGE_SIZE);
797 if (!skb) {
798 __free_pages(page, TBNET_RX_PAGE_ORDER);
799 net->stats.rx_errors++;
800 break;
801 }
802
803 skb_reserve(skb, hdr_size);
804 skb_put(skb, frame_size);
805
806 net->skb = skb;
807 } else {
808 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
809 page, hdr_size, frame_size,
810 TBNET_RX_PAGE_SIZE - hdr_size);
811 }
812
813 net->rx_hdr.frame_size = frame_size;
814 net->rx_hdr.frame_count = le32_to_cpu(hdr->frame_count);
815 net->rx_hdr.frame_index = le16_to_cpu(hdr->frame_index);
816 net->rx_hdr.frame_id = le16_to_cpu(hdr->frame_id);
817 last = net->rx_hdr.frame_index == net->rx_hdr.frame_count - 1;
818
819 rx_packets++;
820 net->stats.rx_bytes += frame_size;
821
822 if (last) {
823 skb->protocol = eth_type_trans(skb, net->dev);
824 napi_gro_receive(&net->napi, skb);
825 net->skb = NULL;
826 }
827 }
828
829 net->stats.rx_packets += rx_packets;
830
831 if (cleaned_count)
832 tbnet_alloc_rx_buffers(net, cleaned_count);
833
834 if (rx_packets >= budget)
835 return budget;
836
837 napi_complete_done(napi, rx_packets);
838
839 tb_ring_poll_complete(net->rx_ring.ring);
840
841 return rx_packets;
842}
843
844static void tbnet_start_poll(void *data)
845{
846 struct tbnet *net = data;
847
848 napi_schedule(&net->napi);
849}
850
851static int tbnet_open(struct net_device *dev)
852{
853 struct tbnet *net = netdev_priv(dev);
854 struct tb_xdomain *xd = net->xd;
855 u16 sof_mask, eof_mask;
856 struct tb_ring *ring;
857
858 netif_carrier_off(dev);
859
860 ring = tb_ring_alloc_tx(xd->tb->nhi, -1, TBNET_RING_SIZE,
861 RING_FLAG_FRAME);
862 if (!ring) {
863 netdev_err(dev, "failed to allocate Tx ring\n");
864 return -ENOMEM;
865 }
866 net->tx_ring.ring = ring;
867
868 sof_mask = BIT(TBIP_PDF_FRAME_START);
869 eof_mask = BIT(TBIP_PDF_FRAME_END);
870
871 ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE,
872 RING_FLAG_FRAME, 0, sof_mask, eof_mask,
873 tbnet_start_poll, net);
874 if (!ring) {
875 netdev_err(dev, "failed to allocate Rx ring\n");
876 tb_ring_free(net->tx_ring.ring);
877 net->tx_ring.ring = NULL;
878 return -ENOMEM;
879 }
880 net->rx_ring.ring = ring;
881
882 napi_enable(&net->napi);
883 start_login(net);
884
885 return 0;
886}
887
888static int tbnet_stop(struct net_device *dev)
889{
890 struct tbnet *net = netdev_priv(dev);
891
892 napi_disable(&net->napi);
893
894 cancel_work_sync(&net->disconnect_work);
895 tbnet_tear_down(net, true);
896
897 tb_ring_free(net->rx_ring.ring);
898 net->rx_ring.ring = NULL;
899 tb_ring_free(net->tx_ring.ring);
900 net->tx_ring.ring = NULL;
901
902 return 0;
903}
904
905static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
906 struct tbnet_frame **frames, u32 frame_count)
907{
908 struct thunderbolt_ip_frame_header *hdr = page_address(frames[0]->page);
909 struct device *dma_dev = tb_ring_dma_device(net->tx_ring.ring);
910 __wsum wsum = htonl(skb->len - skb_transport_offset(skb));
911 unsigned int i, len, offset = skb_transport_offset(skb);
912 __be16 protocol = skb->protocol;
913 void *data = skb->data;
914 void *dest = hdr + 1;
915 __sum16 *tucso;
916
917 if (skb->ip_summed != CHECKSUM_PARTIAL) {
918
919
920
921 for (i = 0; i < frame_count; i++) {
922 hdr = page_address(frames[i]->page);
923 hdr->frame_count = cpu_to_le32(frame_count);
924 dma_sync_single_for_device(dma_dev,
925 frames[i]->frame.buffer_phy,
926 tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
927 }
928
929 return true;
930 }
931
932 if (protocol == htons(ETH_P_8021Q)) {
933 struct vlan_hdr *vhdr, vh;
934
935 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(vh), &vh);
936 if (!vhdr)
937 return false;
938
939 protocol = vhdr->h_vlan_encapsulated_proto;
940 }
941
942
943
944
945
946
947 if (protocol == htons(ETH_P_IP)) {
948 __sum16 *ipcso = dest + ((void *)&(ip_hdr(skb)->check) - data);
949
950 *ipcso = 0;
951 *ipcso = ip_fast_csum(dest + skb_network_offset(skb),
952 ip_hdr(skb)->ihl);
953
954 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
955 tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data);
956 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
957 tucso = dest + ((void *)&(udp_hdr(skb)->check) - data);
958 else
959 return false;
960
961 *tucso = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
962 ip_hdr(skb)->daddr, 0,
963 ip_hdr(skb)->protocol, 0);
964 } else if (skb_is_gso_v6(skb)) {
965 tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data);
966 *tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
967 &ipv6_hdr(skb)->daddr, 0,
968 IPPROTO_TCP, 0);
969 return false;
970 } else if (protocol == htons(ETH_P_IPV6)) {
971 tucso = dest + skb_checksum_start_offset(skb) + skb->csum_offset;
972 *tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
973 &ipv6_hdr(skb)->daddr, 0,
974 ipv6_hdr(skb)->nexthdr, 0);
975 } else {
976 return false;
977 }
978
979
980
981
982 for (i = 0; i < frame_count; i++) {
983 hdr = page_address(frames[i]->page);
984 dest = (void *)(hdr + 1) + offset;
985 len = le32_to_cpu(hdr->frame_size) - offset;
986 wsum = csum_partial(dest, len, wsum);
987 hdr->frame_count = cpu_to_le32(frame_count);
988
989 offset = 0;
990 }
991
992 *tucso = csum_fold(wsum);
993
994
995
996
997 for (i = 0; i < frame_count; i++) {
998 dma_sync_single_for_device(dma_dev, frames[i]->frame.buffer_phy,
999 tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
1000 }
1001
1002 return true;
1003}
1004
1005static void *tbnet_kmap_frag(struct sk_buff *skb, unsigned int frag_num,
1006 unsigned int *len)
1007{
1008 const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num];
1009
1010 *len = skb_frag_size(frag);
1011 return kmap_atomic(skb_frag_page(frag)) + frag->page_offset;
1012}
1013
1014static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb,
1015 struct net_device *dev)
1016{
1017 struct tbnet *net = netdev_priv(dev);
1018 struct tbnet_frame *frames[MAX_SKB_FRAGS];
1019 u16 frame_id = atomic_read(&net->frame_id);
1020 struct thunderbolt_ip_frame_header *hdr;
1021 unsigned int len = skb_headlen(skb);
1022 unsigned int data_len = skb->len;
1023 unsigned int nframes, i;
1024 unsigned int frag = 0;
1025 void *src = skb->data;
1026 u32 frame_index = 0;
1027 bool unmap = false;
1028 void *dest;
1029
1030 nframes = DIV_ROUND_UP(data_len, TBNET_MAX_PAYLOAD_SIZE);
1031 if (tbnet_available_buffers(&net->tx_ring) < nframes) {
1032 netif_stop_queue(net->dev);
1033 return NETDEV_TX_BUSY;
1034 }
1035
1036 frames[frame_index] = tbnet_get_tx_buffer(net);
1037 if (!frames[frame_index])
1038 goto err_drop;
1039
1040 hdr = page_address(frames[frame_index]->page);
1041 dest = hdr + 1;
1042
1043
1044 while (data_len > TBNET_MAX_PAYLOAD_SIZE) {
1045 unsigned int size_left = TBNET_MAX_PAYLOAD_SIZE;
1046
1047 hdr->frame_size = cpu_to_le32(TBNET_MAX_PAYLOAD_SIZE);
1048 hdr->frame_index = cpu_to_le16(frame_index);
1049 hdr->frame_id = cpu_to_le16(frame_id);
1050
1051 do {
1052 if (len > size_left) {
1053
1054
1055
1056
1057 memcpy(dest, src, size_left);
1058 len -= size_left;
1059 dest += size_left;
1060 src += size_left;
1061 break;
1062 }
1063
1064 memcpy(dest, src, len);
1065 size_left -= len;
1066 dest += len;
1067
1068 if (unmap) {
1069 kunmap_atomic(src);
1070 unmap = false;
1071 }
1072
1073
1074 if (frag < skb_shinfo(skb)->nr_frags) {
1075
1076 src = tbnet_kmap_frag(skb, frag++, &len);
1077 unmap = true;
1078 } else if (unlikely(size_left > 0)) {
1079 goto err_drop;
1080 }
1081 } while (size_left > 0);
1082
1083 data_len -= TBNET_MAX_PAYLOAD_SIZE;
1084 frame_index++;
1085
1086 frames[frame_index] = tbnet_get_tx_buffer(net);
1087 if (!frames[frame_index])
1088 goto err_drop;
1089
1090 hdr = page_address(frames[frame_index]->page);
1091 dest = hdr + 1;
1092 }
1093
1094 hdr->frame_size = cpu_to_le32(data_len);
1095 hdr->frame_index = cpu_to_le16(frame_index);
1096 hdr->frame_id = cpu_to_le16(frame_id);
1097
1098 frames[frame_index]->frame.size = data_len + sizeof(*hdr);
1099
1100
1101 while (len < data_len) {
1102 memcpy(dest, src, len);
1103 data_len -= len;
1104 dest += len;
1105
1106 if (unmap) {
1107 kunmap_atomic(src);
1108 unmap = false;
1109 }
1110
1111 if (frag < skb_shinfo(skb)->nr_frags) {
1112 src = tbnet_kmap_frag(skb, frag++, &len);
1113 unmap = true;
1114 } else if (unlikely(data_len > 0)) {
1115 goto err_drop;
1116 }
1117 }
1118
1119 memcpy(dest, src, data_len);
1120
1121 if (unmap)
1122 kunmap_atomic(src);
1123
1124 if (!tbnet_xmit_csum_and_map(net, skb, frames, frame_index + 1))
1125 goto err_drop;
1126
1127 for (i = 0; i < frame_index + 1; i++)
1128 tb_ring_tx(net->tx_ring.ring, &frames[i]->frame);
1129
1130 if (net->svc->prtcstns & TBNET_MATCH_FRAGS_ID)
1131 atomic_inc(&net->frame_id);
1132
1133 net->stats.tx_packets++;
1134 net->stats.tx_bytes += skb->len;
1135
1136 dev_consume_skb_any(skb);
1137
1138 return NETDEV_TX_OK;
1139
1140err_drop:
1141
1142 net->tx_ring.cons -= frame_index;
1143
1144 dev_kfree_skb_any(skb);
1145 net->stats.tx_errors++;
1146
1147 return NETDEV_TX_OK;
1148}
1149
1150static void tbnet_get_stats64(struct net_device *dev,
1151 struct rtnl_link_stats64 *stats)
1152{
1153 struct tbnet *net = netdev_priv(dev);
1154
1155 stats->tx_packets = net->stats.tx_packets;
1156 stats->rx_packets = net->stats.rx_packets;
1157 stats->tx_bytes = net->stats.tx_bytes;
1158 stats->rx_bytes = net->stats.rx_bytes;
1159 stats->rx_errors = net->stats.rx_errors + net->stats.rx_length_errors +
1160 net->stats.rx_over_errors + net->stats.rx_crc_errors +
1161 net->stats.rx_missed_errors;
1162 stats->tx_errors = net->stats.tx_errors;
1163 stats->rx_length_errors = net->stats.rx_length_errors;
1164 stats->rx_over_errors = net->stats.rx_over_errors;
1165 stats->rx_crc_errors = net->stats.rx_crc_errors;
1166 stats->rx_missed_errors = net->stats.rx_missed_errors;
1167}
1168
1169static const struct net_device_ops tbnet_netdev_ops = {
1170 .ndo_open = tbnet_open,
1171 .ndo_stop = tbnet_stop,
1172 .ndo_start_xmit = tbnet_start_xmit,
1173 .ndo_get_stats64 = tbnet_get_stats64,
1174};
1175
1176static void tbnet_generate_mac(struct net_device *dev)
1177{
1178 const struct tbnet *net = netdev_priv(dev);
1179 const struct tb_xdomain *xd = net->xd;
1180 u8 phy_port;
1181 u32 hash;
1182
1183 phy_port = tb_phy_port_from_link(TBNET_L0_PORT_NUM(xd->route));
1184
1185
1186 dev->dev_addr[0] = phy_port << 4 | 0x02;
1187 hash = jhash2((u32 *)xd->local_uuid, 4, 0);
1188 memcpy(dev->dev_addr + 1, &hash, sizeof(hash));
1189 hash = jhash2((u32 *)xd->local_uuid, 4, hash);
1190 dev->dev_addr[5] = hash & 0xff;
1191}
1192
1193static int tbnet_probe(struct tb_service *svc, const struct tb_service_id *id)
1194{
1195 struct tb_xdomain *xd = tb_service_parent(svc);
1196 struct net_device *dev;
1197 struct tbnet *net;
1198 int ret;
1199
1200 dev = alloc_etherdev(sizeof(*net));
1201 if (!dev)
1202 return -ENOMEM;
1203
1204 SET_NETDEV_DEV(dev, &svc->dev);
1205
1206 net = netdev_priv(dev);
1207 INIT_DELAYED_WORK(&net->login_work, tbnet_login_work);
1208 INIT_WORK(&net->connected_work, tbnet_connected_work);
1209 INIT_WORK(&net->disconnect_work, tbnet_disconnect_work);
1210 mutex_init(&net->connection_lock);
1211 atomic_set(&net->command_id, 0);
1212 atomic_set(&net->frame_id, 0);
1213 net->svc = svc;
1214 net->dev = dev;
1215 net->xd = xd;
1216
1217 tbnet_generate_mac(dev);
1218
1219 strcpy(dev->name, "thunderbolt%d");
1220 dev->netdev_ops = &tbnet_netdev_ops;
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235 dev->hw_features = NETIF_F_SG | NETIF_F_ALL_TSO | NETIF_F_GRO |
1236 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1237 dev->features = dev->hw_features | NETIF_F_HIGHDMA;
1238 dev->hard_header_len += sizeof(struct thunderbolt_ip_frame_header);
1239
1240 netif_napi_add(dev, &net->napi, tbnet_poll, NAPI_POLL_WEIGHT);
1241
1242
1243 dev->min_mtu = ETH_MIN_MTU;
1244 dev->max_mtu = TBNET_MAX_MTU - ETH_HLEN;
1245
1246 net->handler.uuid = &tbnet_svc_uuid;
1247 net->handler.callback = tbnet_handle_packet,
1248 net->handler.data = net;
1249 tb_register_protocol_handler(&net->handler);
1250
1251 tb_service_set_drvdata(svc, net);
1252
1253 ret = register_netdev(dev);
1254 if (ret) {
1255 tb_unregister_protocol_handler(&net->handler);
1256 free_netdev(dev);
1257 return ret;
1258 }
1259
1260 return 0;
1261}
1262
1263static void tbnet_remove(struct tb_service *svc)
1264{
1265 struct tbnet *net = tb_service_get_drvdata(svc);
1266
1267 unregister_netdev(net->dev);
1268 tb_unregister_protocol_handler(&net->handler);
1269 free_netdev(net->dev);
1270}
1271
1272static void tbnet_shutdown(struct tb_service *svc)
1273{
1274 tbnet_tear_down(tb_service_get_drvdata(svc), true);
1275}
1276
1277static int __maybe_unused tbnet_suspend(struct device *dev)
1278{
1279 struct tb_service *svc = tb_to_service(dev);
1280 struct tbnet *net = tb_service_get_drvdata(svc);
1281
1282 stop_login(net);
1283 if (netif_running(net->dev)) {
1284 netif_device_detach(net->dev);
1285 tbnet_tear_down(net, true);
1286 }
1287
1288 return 0;
1289}
1290
1291static int __maybe_unused tbnet_resume(struct device *dev)
1292{
1293 struct tb_service *svc = tb_to_service(dev);
1294 struct tbnet *net = tb_service_get_drvdata(svc);
1295
1296 netif_carrier_off(net->dev);
1297 if (netif_running(net->dev)) {
1298 netif_device_attach(net->dev);
1299 start_login(net);
1300 }
1301
1302 return 0;
1303}
1304
1305static const struct dev_pm_ops tbnet_pm_ops = {
1306 SET_SYSTEM_SLEEP_PM_OPS(tbnet_suspend, tbnet_resume)
1307};
1308
1309static const struct tb_service_id tbnet_ids[] = {
1310 { TB_SERVICE("network", 1) },
1311 { },
1312};
1313MODULE_DEVICE_TABLE(tbsvc, tbnet_ids);
1314
1315static struct tb_service_driver tbnet_driver = {
1316 .driver = {
1317 .owner = THIS_MODULE,
1318 .name = "thunderbolt-net",
1319 .pm = &tbnet_pm_ops,
1320 },
1321 .probe = tbnet_probe,
1322 .remove = tbnet_remove,
1323 .shutdown = tbnet_shutdown,
1324 .id_table = tbnet_ids,
1325};
1326
1327static int __init tbnet_init(void)
1328{
1329 int ret;
1330
1331 tbnet_dir = tb_property_create_dir(&tbnet_dir_uuid);
1332 if (!tbnet_dir)
1333 return -ENOMEM;
1334
1335 tb_property_add_immediate(tbnet_dir, "prtcid", 1);
1336 tb_property_add_immediate(tbnet_dir, "prtcvers", 1);
1337 tb_property_add_immediate(tbnet_dir, "prtcrevs", 1);
1338 tb_property_add_immediate(tbnet_dir, "prtcstns",
1339 TBNET_MATCH_FRAGS_ID);
1340
1341 ret = tb_register_property_dir("network", tbnet_dir);
1342 if (ret) {
1343 tb_property_free_dir(tbnet_dir);
1344 return ret;
1345 }
1346
1347 return tb_register_service_driver(&tbnet_driver);
1348}
1349module_init(tbnet_init);
1350
1351static void __exit tbnet_exit(void)
1352{
1353 tb_unregister_service_driver(&tbnet_driver);
1354 tb_unregister_property_dir("network", tbnet_dir);
1355 tb_property_free_dir(tbnet_dir);
1356}
1357module_exit(tbnet_exit);
1358
1359MODULE_AUTHOR("Amir Levy <amir.jer.levy@intel.com>");
1360MODULE_AUTHOR("Michael Jamet <michael.jamet@intel.com>");
1361MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
1362MODULE_DESCRIPTION("Thunderbolt network driver");
1363MODULE_LICENSE("GPL v2");
1364