1
2
3
4#include <linux/ip.h>
5#include <linux/ipv6.h>
6#include <linux/if_vlan.h>
7#include <net/ip6_checksum.h>
8
9#include "ionic.h"
10#include "ionic_lif.h"
11#include "ionic_txrx.h"
12
13static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info,
14 struct ionic_cq_info *cq_info, void *cb_arg);
15
16static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
17 ionic_desc_cb cb_func, void *cb_arg)
18{
19 DEBUG_STATS_TXQ_POST(q_to_qcq(q), q->head->desc, ring_dbell);
20
21 ionic_q_post(q, ring_dbell, cb_func, cb_arg);
22}
23
24static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
25 ionic_desc_cb cb_func, void *cb_arg)
26{
27 ionic_q_post(q, ring_dbell, cb_func, cb_arg);
28
29 DEBUG_STATS_RX_BUFF_CNT(q_to_qcq(q));
30}
31
32static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
33{
34 return netdev_get_tx_queue(q->lif->netdev, q->index);
35}
36
37static struct sk_buff *ionic_rx_skb_alloc(struct ionic_queue *q,
38 unsigned int len, bool frags)
39{
40 struct ionic_lif *lif = q->lif;
41 struct ionic_rx_stats *stats;
42 struct net_device *netdev;
43 struct sk_buff *skb;
44
45 netdev = lif->netdev;
46 stats = q_to_rx_stats(q);
47
48 if (frags)
49 skb = napi_get_frags(&q_to_qcq(q)->napi);
50 else
51 skb = netdev_alloc_skb_ip_align(netdev, len);
52
53 if (unlikely(!skb)) {
54 net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
55 netdev->name, q->name);
56 stats->alloc_err++;
57 return NULL;
58 }
59
60 return skb;
61}
62
63static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
64 struct ionic_desc_info *desc_info,
65 struct ionic_cq_info *cq_info)
66{
67 struct ionic_rxq_comp *comp = cq_info->cq_desc;
68 struct device *dev = q->lif->ionic->dev;
69 struct ionic_page_info *page_info;
70 struct sk_buff *skb;
71 unsigned int i;
72 u16 frag_len;
73 u16 len;
74
75 page_info = &desc_info->pages[0];
76 len = le16_to_cpu(comp->len);
77
78 prefetch(page_address(page_info->page) + NET_IP_ALIGN);
79
80 skb = ionic_rx_skb_alloc(q, len, true);
81 if (unlikely(!skb))
82 return NULL;
83
84 i = comp->num_sg_elems + 1;
85 do {
86 if (unlikely(!page_info->page)) {
87 struct napi_struct *napi = &q_to_qcq(q)->napi;
88
89 napi->skb = NULL;
90 dev_kfree_skb(skb);
91 return NULL;
92 }
93
94 frag_len = min(len, (u16)PAGE_SIZE);
95 len -= frag_len;
96
97 dma_unmap_page(dev, dma_unmap_addr(page_info, dma_addr),
98 PAGE_SIZE, DMA_FROM_DEVICE);
99 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
100 page_info->page, 0, frag_len, PAGE_SIZE);
101 page_info->page = NULL;
102 page_info++;
103 i--;
104 } while (i > 0);
105
106 return skb;
107}
108
109static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
110 struct ionic_desc_info *desc_info,
111 struct ionic_cq_info *cq_info)
112{
113 struct ionic_rxq_comp *comp = cq_info->cq_desc;
114 struct device *dev = q->lif->ionic->dev;
115 struct ionic_page_info *page_info;
116 struct sk_buff *skb;
117 u16 len;
118
119 page_info = &desc_info->pages[0];
120 len = le16_to_cpu(comp->len);
121
122 skb = ionic_rx_skb_alloc(q, len, false);
123 if (unlikely(!skb))
124 return NULL;
125
126 if (unlikely(!page_info->page)) {
127 dev_kfree_skb(skb);
128 return NULL;
129 }
130
131 dma_sync_single_for_cpu(dev, dma_unmap_addr(page_info, dma_addr),
132 len, DMA_FROM_DEVICE);
133 skb_copy_to_linear_data(skb, page_address(page_info->page), len);
134 dma_sync_single_for_device(dev, dma_unmap_addr(page_info, dma_addr),
135 len, DMA_FROM_DEVICE);
136
137 skb_put(skb, len);
138 skb->protocol = eth_type_trans(skb, q->lif->netdev);
139
140 return skb;
141}
142
143static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info,
144 struct ionic_cq_info *cq_info, void *cb_arg)
145{
146 struct ionic_rxq_comp *comp = cq_info->cq_desc;
147 struct ionic_qcq *qcq = q_to_qcq(q);
148 struct ionic_rx_stats *stats;
149 struct net_device *netdev;
150 struct sk_buff *skb;
151
152 stats = q_to_rx_stats(q);
153 netdev = q->lif->netdev;
154
155 if (comp->status) {
156 stats->dropped++;
157 return;
158 }
159
160
161 if (unlikely(test_bit(IONIC_LIF_QUEUE_RESET, q->lif->state))) {
162 stats->dropped++;
163 return;
164 }
165
166 stats->pkts++;
167 stats->bytes += le16_to_cpu(comp->len);
168
169 if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
170 skb = ionic_rx_copybreak(q, desc_info, cq_info);
171 else
172 skb = ionic_rx_frags(q, desc_info, cq_info);
173
174 if (unlikely(!skb)) {
175 stats->dropped++;
176 return;
177 }
178
179 skb_record_rx_queue(skb, q->index);
180
181 if (likely(netdev->features & NETIF_F_RXHASH)) {
182 switch (comp->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) {
183 case IONIC_PKT_TYPE_IPV4:
184 case IONIC_PKT_TYPE_IPV6:
185 skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
186 PKT_HASH_TYPE_L3);
187 break;
188 case IONIC_PKT_TYPE_IPV4_TCP:
189 case IONIC_PKT_TYPE_IPV6_TCP:
190 case IONIC_PKT_TYPE_IPV4_UDP:
191 case IONIC_PKT_TYPE_IPV6_UDP:
192 skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
193 PKT_HASH_TYPE_L4);
194 break;
195 }
196 }
197
198 if (likely(netdev->features & NETIF_F_RXCSUM)) {
199 if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
200 skb->ip_summed = CHECKSUM_COMPLETE;
201 skb->csum = (__wsum)le16_to_cpu(comp->csum);
202 stats->csum_complete++;
203 }
204 } else {
205 stats->csum_none++;
206 }
207
208 if (unlikely((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
209 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) ||
210 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)))
211 stats->csum_error++;
212
213 if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
214 if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)
215 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
216 le16_to_cpu(comp->vlan_tci));
217 }
218
219 if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
220 napi_gro_receive(&qcq->napi, skb);
221 else
222 napi_gro_frags(&qcq->napi);
223}
224
225static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
226{
227 struct ionic_rxq_comp *comp = cq_info->cq_desc;
228 struct ionic_queue *q = cq->bound_q;
229 struct ionic_desc_info *desc_info;
230
231 if (!color_match(comp->pkt_type_color, cq->done_color))
232 return false;
233
234
235 if (q->tail->index == q->head->index)
236 return false;
237
238 desc_info = q->tail;
239 if (desc_info->index != le16_to_cpu(comp->comp_index))
240 return false;
241
242 q->tail = desc_info->next;
243
244
245 ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg);
246
247 desc_info->cb = NULL;
248 desc_info->cb_arg = NULL;
249
250 return true;
251}
252
253static u32 ionic_rx_walk_cq(struct ionic_cq *rxcq, u32 limit)
254{
255 u32 work_done = 0;
256
257 while (ionic_rx_service(rxcq, rxcq->tail)) {
258 if (rxcq->tail->last)
259 rxcq->done_color = !rxcq->done_color;
260 rxcq->tail = rxcq->tail->next;
261 DEBUG_STATS_CQE_CNT(rxcq);
262
263 if (++work_done >= limit)
264 break;
265 }
266
267 return work_done;
268}
269
270void ionic_rx_flush(struct ionic_cq *cq)
271{
272 struct ionic_dev *idev = &cq->lif->ionic->idev;
273 u32 work_done;
274
275 work_done = ionic_rx_walk_cq(cq, cq->num_descs);
276
277 if (work_done)
278 ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
279 work_done, IONIC_INTR_CRED_RESET_COALESCE);
280}
281
282static struct page *ionic_rx_page_alloc(struct ionic_queue *q,
283 dma_addr_t *dma_addr)
284{
285 struct ionic_lif *lif = q->lif;
286 struct ionic_rx_stats *stats;
287 struct net_device *netdev;
288 struct device *dev;
289 struct page *page;
290
291 netdev = lif->netdev;
292 dev = lif->ionic->dev;
293 stats = q_to_rx_stats(q);
294 page = alloc_page(GFP_ATOMIC);
295 if (unlikely(!page)) {
296 net_err_ratelimited("%s: Page alloc failed on %s!\n",
297 netdev->name, q->name);
298 stats->alloc_err++;
299 return NULL;
300 }
301
302 *dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
303 if (unlikely(dma_mapping_error(dev, *dma_addr))) {
304 __free_page(page);
305 net_err_ratelimited("%s: DMA single map failed on %s!\n",
306 netdev->name, q->name);
307 stats->dma_map_err++;
308 return NULL;
309 }
310
311 return page;
312}
313
314static void ionic_rx_page_free(struct ionic_queue *q, struct page *page,
315 dma_addr_t dma_addr)
316{
317 struct ionic_lif *lif = q->lif;
318 struct net_device *netdev;
319 struct device *dev;
320
321 netdev = lif->netdev;
322 dev = lif->ionic->dev;
323
324 if (unlikely(!page)) {
325 net_err_ratelimited("%s: Trying to free unallocated buffer on %s!\n",
326 netdev->name, q->name);
327 return;
328 }
329
330 dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
331
332 __free_page(page);
333}
334
335#define IONIC_RX_RING_DOORBELL_STRIDE ((1 << 5) - 1)
336#define IONIC_RX_RING_HEAD_BUF_SZ 2048
337
338void ionic_rx_fill(struct ionic_queue *q)
339{
340 struct net_device *netdev = q->lif->netdev;
341 struct ionic_desc_info *desc_info;
342 struct ionic_page_info *page_info;
343 struct ionic_rxq_sg_desc *sg_desc;
344 struct ionic_rxq_sg_elem *sg_elem;
345 struct ionic_rxq_desc *desc;
346 unsigned int remain_len;
347 unsigned int seg_len;
348 unsigned int nfrags;
349 bool ring_doorbell;
350 unsigned int i, j;
351 unsigned int len;
352
353 len = netdev->mtu + ETH_HLEN;
354 nfrags = round_up(len, PAGE_SIZE) / PAGE_SIZE;
355
356 for (i = ionic_q_space_avail(q); i; i--) {
357 remain_len = len;
358 desc_info = q->head;
359 desc = desc_info->desc;
360 sg_desc = desc_info->sg_desc;
361 page_info = &desc_info->pages[0];
362
363 if (page_info->page) {
364 ring_doorbell = ((q->head->index + 1) &
365 IONIC_RX_RING_DOORBELL_STRIDE) == 0;
366 ionic_rxq_post(q, ring_doorbell, ionic_rx_clean, NULL);
367 continue;
368 }
369
370
371 desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
372 IONIC_RXQ_DESC_OPCODE_SIMPLE;
373 desc_info->npages = nfrags;
374 page_info->page = ionic_rx_page_alloc(q, &page_info->dma_addr);
375 if (unlikely(!page_info->page)) {
376 desc->addr = 0;
377 desc->len = 0;
378 return;
379 }
380 desc->addr = cpu_to_le64(page_info->dma_addr);
381 seg_len = min_t(unsigned int, PAGE_SIZE, len);
382 desc->len = cpu_to_le16(seg_len);
383 remain_len -= seg_len;
384 page_info++;
385
386
387 for (j = 0; j < nfrags - 1; j++) {
388 if (page_info->page)
389 continue;
390
391 sg_elem = &sg_desc->elems[j];
392 page_info->page = ionic_rx_page_alloc(q, &page_info->dma_addr);
393 if (unlikely(!page_info->page)) {
394 sg_elem->addr = 0;
395 sg_elem->len = 0;
396 return;
397 }
398 sg_elem->addr = cpu_to_le64(page_info->dma_addr);
399 seg_len = min_t(unsigned int, PAGE_SIZE, remain_len);
400 sg_elem->len = cpu_to_le16(seg_len);
401 remain_len -= seg_len;
402 page_info++;
403 }
404
405 ring_doorbell = ((q->head->index + 1) &
406 IONIC_RX_RING_DOORBELL_STRIDE) == 0;
407 ionic_rxq_post(q, ring_doorbell, ionic_rx_clean, NULL);
408 }
409}
410
411static void ionic_rx_fill_cb(void *arg)
412{
413 ionic_rx_fill(arg);
414}
415
416void ionic_rx_empty(struct ionic_queue *q)
417{
418 struct ionic_desc_info *cur;
419 struct ionic_rxq_desc *desc;
420 unsigned int i;
421
422 for (cur = q->tail; cur != q->head; cur = cur->next) {
423 desc = cur->desc;
424 desc->addr = 0;
425 desc->len = 0;
426
427 for (i = 0; i < cur->npages; i++) {
428 if (likely(cur->pages[i].page)) {
429 ionic_rx_page_free(q, cur->pages[i].page,
430 cur->pages[i].dma_addr);
431 cur->pages[i].page = NULL;
432 cur->pages[i].dma_addr = 0;
433 }
434 }
435
436 cur->cb_arg = NULL;
437 }
438}
439
440int ionic_rx_napi(struct napi_struct *napi, int budget)
441{
442 struct ionic_qcq *qcq = napi_to_qcq(napi);
443 struct ionic_cq *rxcq = napi_to_cq(napi);
444 unsigned int qi = rxcq->bound_q->index;
445 struct ionic_dev *idev;
446 struct ionic_lif *lif;
447 struct ionic_cq *txcq;
448 u32 work_done = 0;
449 u32 flags = 0;
450
451 lif = rxcq->bound_q->lif;
452 idev = &lif->ionic->idev;
453 txcq = &lif->txqcqs[qi].qcq->cq;
454
455 ionic_tx_flush(txcq);
456
457 work_done = ionic_rx_walk_cq(rxcq, budget);
458
459 if (work_done)
460 ionic_rx_fill_cb(rxcq->bound_q);
461
462 if (work_done < budget && napi_complete_done(napi, work_done)) {
463 flags |= IONIC_INTR_CRED_UNMASK;
464 DEBUG_STATS_INTR_REARM(rxcq->bound_intr);
465 }
466
467 if (work_done || flags) {
468 flags |= IONIC_INTR_CRED_RESET_COALESCE;
469 ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index,
470 work_done, flags);
471 }
472
473 DEBUG_STATS_NAPI_POLL(qcq, work_done);
474
475 return work_done;
476}
477
478static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, void *data, size_t len)
479{
480 struct ionic_tx_stats *stats = q_to_tx_stats(q);
481 struct device *dev = q->lif->ionic->dev;
482 dma_addr_t dma_addr;
483
484 dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE);
485 if (dma_mapping_error(dev, dma_addr)) {
486 net_warn_ratelimited("%s: DMA single map failed on %s!\n",
487 q->lif->netdev->name, q->name);
488 stats->dma_map_err++;
489 return 0;
490 }
491 return dma_addr;
492}
493
494static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, const skb_frag_t *frag,
495 size_t offset, size_t len)
496{
497 struct ionic_tx_stats *stats = q_to_tx_stats(q);
498 struct device *dev = q->lif->ionic->dev;
499 dma_addr_t dma_addr;
500
501 dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE);
502 if (dma_mapping_error(dev, dma_addr)) {
503 net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
504 q->lif->netdev->name, q->name);
505 stats->dma_map_err++;
506 }
507 return dma_addr;
508}
509
510static void ionic_tx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info,
511 struct ionic_cq_info *cq_info, void *cb_arg)
512{
513 struct ionic_txq_sg_desc *sg_desc = desc_info->sg_desc;
514 struct ionic_txq_sg_elem *elem = sg_desc->elems;
515 struct ionic_tx_stats *stats = q_to_tx_stats(q);
516 struct ionic_txq_desc *desc = desc_info->desc;
517 struct device *dev = q->lif->ionic->dev;
518 u8 opcode, flags, nsge;
519 u16 queue_index;
520 unsigned int i;
521 u64 addr;
522
523 decode_txq_desc_cmd(le64_to_cpu(desc->cmd),
524 &opcode, &flags, &nsge, &addr);
525
526
527
528
529 if (opcode != IONIC_TXQ_DESC_OPCODE_TSO ||
530 flags & IONIC_TXQ_DESC_FLAG_TSO_SOT)
531 dma_unmap_single(dev, (dma_addr_t)addr,
532 le16_to_cpu(desc->len), DMA_TO_DEVICE);
533 else
534 dma_unmap_page(dev, (dma_addr_t)addr,
535 le16_to_cpu(desc->len), DMA_TO_DEVICE);
536
537 for (i = 0; i < nsge; i++, elem++)
538 dma_unmap_page(dev, (dma_addr_t)le64_to_cpu(elem->addr),
539 le16_to_cpu(elem->len), DMA_TO_DEVICE);
540
541 if (cb_arg) {
542 struct sk_buff *skb = cb_arg;
543 u32 len = skb->len;
544
545 queue_index = skb_get_queue_mapping(skb);
546 if (unlikely(__netif_subqueue_stopped(q->lif->netdev,
547 queue_index))) {
548 netif_wake_subqueue(q->lif->netdev, queue_index);
549 q->wake++;
550 }
551 dev_kfree_skb_any(skb);
552 stats->clean++;
553 netdev_tx_completed_queue(q_to_ndq(q), 1, len);
554 }
555}
556
557void ionic_tx_flush(struct ionic_cq *cq)
558{
559 struct ionic_txq_comp *comp = cq->tail->cq_desc;
560 struct ionic_dev *idev = &cq->lif->ionic->idev;
561 struct ionic_queue *q = cq->bound_q;
562 struct ionic_desc_info *desc_info;
563 unsigned int work_done = 0;
564
565
566 while (work_done < cq->num_descs &&
567 color_match(comp->color, cq->done_color)) {
568
569
570
571
572 do {
573 desc_info = q->tail;
574 q->tail = desc_info->next;
575 ionic_tx_clean(q, desc_info, cq->tail,
576 desc_info->cb_arg);
577 desc_info->cb = NULL;
578 desc_info->cb_arg = NULL;
579 } while (desc_info->index != le16_to_cpu(comp->comp_index));
580
581 if (cq->tail->last)
582 cq->done_color = !cq->done_color;
583
584 cq->tail = cq->tail->next;
585 comp = cq->tail->cq_desc;
586 DEBUG_STATS_CQE_CNT(cq);
587
588 work_done++;
589 }
590
591 if (work_done)
592 ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
593 work_done, 0);
594}
595
596static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb)
597{
598 int err;
599
600 err = skb_cow_head(skb, 0);
601 if (err)
602 return err;
603
604 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
605 inner_ip_hdr(skb)->check = 0;
606 inner_tcp_hdr(skb)->check =
607 ~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr,
608 inner_ip_hdr(skb)->daddr,
609 0, IPPROTO_TCP, 0);
610 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
611 inner_tcp_hdr(skb)->check =
612 ~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr,
613 &inner_ipv6_hdr(skb)->daddr,
614 0, IPPROTO_TCP, 0);
615 }
616
617 return 0;
618}
619
620static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
621{
622 int err;
623
624 err = skb_cow_head(skb, 0);
625 if (err)
626 return err;
627
628 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
629 ip_hdr(skb)->check = 0;
630 tcp_hdr(skb)->check =
631 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
632 ip_hdr(skb)->daddr,
633 0, IPPROTO_TCP, 0);
634 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
635 tcp_hdr(skb)->check =
636 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
637 &ipv6_hdr(skb)->daddr,
638 0, IPPROTO_TCP, 0);
639 }
640
641 return 0;
642}
643
644static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc,
645 struct sk_buff *skb,
646 dma_addr_t addr, u8 nsge, u16 len,
647 unsigned int hdrlen, unsigned int mss,
648 bool outer_csum,
649 u16 vlan_tci, bool has_vlan,
650 bool start, bool done)
651{
652 u8 flags = 0;
653 u64 cmd;
654
655 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
656 flags |= outer_csum ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
657 flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0;
658 flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0;
659
660 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, flags, nsge, addr);
661 desc->cmd = cpu_to_le64(cmd);
662 desc->len = cpu_to_le16(len);
663 desc->vlan_tci = cpu_to_le16(vlan_tci);
664 desc->hdr_len = cpu_to_le16(hdrlen);
665 desc->mss = cpu_to_le16(mss);
666
667 if (done) {
668 skb_tx_timestamp(skb);
669 netdev_tx_sent_queue(q_to_ndq(q), skb->len);
670 ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
671 } else {
672 ionic_txq_post(q, false, ionic_tx_clean, NULL);
673 }
674}
675
676static struct ionic_txq_desc *ionic_tx_tso_next(struct ionic_queue *q,
677 struct ionic_txq_sg_elem **elem)
678{
679 struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc;
680 struct ionic_txq_desc *desc = q->head->desc;
681
682 *elem = sg_desc->elems;
683 return desc;
684}
685
686static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
687{
688 struct ionic_tx_stats *stats = q_to_tx_stats(q);
689 struct ionic_desc_info *abort = q->head;
690 struct device *dev = q->lif->ionic->dev;
691 struct ionic_desc_info *rewind = abort;
692 struct ionic_txq_sg_elem *elem;
693 struct ionic_txq_desc *desc;
694 unsigned int frag_left = 0;
695 unsigned int offset = 0;
696 unsigned int len_left;
697 dma_addr_t desc_addr;
698 unsigned int hdrlen;
699 unsigned int nfrags;
700 unsigned int seglen;
701 u64 total_bytes = 0;
702 u64 total_pkts = 0;
703 unsigned int left;
704 unsigned int len;
705 unsigned int mss;
706 skb_frag_t *frag;
707 bool start, done;
708 bool outer_csum;
709 bool has_vlan;
710 u16 desc_len;
711 u8 desc_nsge;
712 u16 vlan_tci;
713 bool encap;
714 int err;
715
716 mss = skb_shinfo(skb)->gso_size;
717 nfrags = skb_shinfo(skb)->nr_frags;
718 len_left = skb->len - skb_headlen(skb);
719 outer_csum = (skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM) ||
720 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
721 has_vlan = !!skb_vlan_tag_present(skb);
722 vlan_tci = skb_vlan_tag_get(skb);
723 encap = skb->encapsulation;
724
725
726
727
728
729
730 if (encap)
731 err = ionic_tx_tcp_inner_pseudo_csum(skb);
732 else
733 err = ionic_tx_tcp_pseudo_csum(skb);
734 if (err)
735 return err;
736
737 if (encap)
738 hdrlen = skb_inner_transport_header(skb) - skb->data +
739 inner_tcp_hdrlen(skb);
740 else
741 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
742
743 seglen = hdrlen + mss;
744 left = skb_headlen(skb);
745
746 desc = ionic_tx_tso_next(q, &elem);
747 start = true;
748
749
750
751 while (left > 0) {
752 len = min(seglen, left);
753 frag_left = seglen - len;
754 desc_addr = ionic_tx_map_single(q, skb->data + offset, len);
755 if (dma_mapping_error(dev, desc_addr))
756 goto err_out_abort;
757 desc_len = len;
758 desc_nsge = 0;
759 left -= len;
760 offset += len;
761 if (nfrags > 0 && frag_left > 0)
762 continue;
763 done = (nfrags == 0 && left == 0);
764 ionic_tx_tso_post(q, desc, skb,
765 desc_addr, desc_nsge, desc_len,
766 hdrlen, mss,
767 outer_csum,
768 vlan_tci, has_vlan,
769 start, done);
770 total_pkts++;
771 total_bytes += start ? len : len + hdrlen;
772 desc = ionic_tx_tso_next(q, &elem);
773 start = false;
774 seglen = mss;
775 }
776
777
778
779 for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
780 offset = 0;
781 left = skb_frag_size(frag);
782 len_left -= left;
783 nfrags--;
784 stats->frags++;
785
786 while (left > 0) {
787 if (frag_left > 0) {
788 len = min(frag_left, left);
789 frag_left -= len;
790 elem->addr =
791 cpu_to_le64(ionic_tx_map_frag(q, frag,
792 offset, len));
793 if (dma_mapping_error(dev, elem->addr))
794 goto err_out_abort;
795 elem->len = cpu_to_le16(len);
796 elem++;
797 desc_nsge++;
798 left -= len;
799 offset += len;
800 if (nfrags > 0 && frag_left > 0)
801 continue;
802 done = (nfrags == 0 && left == 0);
803 ionic_tx_tso_post(q, desc, skb, desc_addr,
804 desc_nsge, desc_len,
805 hdrlen, mss, outer_csum,
806 vlan_tci, has_vlan,
807 start, done);
808 total_pkts++;
809 total_bytes += start ? len : len + hdrlen;
810 desc = ionic_tx_tso_next(q, &elem);
811 start = false;
812 } else {
813 len = min(mss, left);
814 frag_left = mss - len;
815 desc_addr = ionic_tx_map_frag(q, frag,
816 offset, len);
817 if (dma_mapping_error(dev, desc_addr))
818 goto err_out_abort;
819 desc_len = len;
820 desc_nsge = 0;
821 left -= len;
822 offset += len;
823 if (nfrags > 0 && frag_left > 0)
824 continue;
825 done = (nfrags == 0 && left == 0);
826 ionic_tx_tso_post(q, desc, skb, desc_addr,
827 desc_nsge, desc_len,
828 hdrlen, mss, outer_csum,
829 vlan_tci, has_vlan,
830 start, done);
831 total_pkts++;
832 total_bytes += start ? len : len + hdrlen;
833 desc = ionic_tx_tso_next(q, &elem);
834 start = false;
835 }
836 }
837 }
838
839 stats->pkts += total_pkts;
840 stats->bytes += total_bytes;
841 stats->tso++;
842
843 return 0;
844
845err_out_abort:
846 while (rewind->desc != q->head->desc) {
847 ionic_tx_clean(q, rewind, NULL, NULL);
848 rewind = rewind->next;
849 }
850 q->head = abort;
851
852 return -ENOMEM;
853}
854
855static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb)
856{
857 struct ionic_tx_stats *stats = q_to_tx_stats(q);
858 struct ionic_txq_desc *desc = q->head->desc;
859 struct device *dev = q->lif->ionic->dev;
860 dma_addr_t dma_addr;
861 bool has_vlan;
862 u8 flags = 0;
863 bool encap;
864 u64 cmd;
865
866 has_vlan = !!skb_vlan_tag_present(skb);
867 encap = skb->encapsulation;
868
869 dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
870 if (dma_mapping_error(dev, dma_addr))
871 return -ENOMEM;
872
873 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
874 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
875
876 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL,
877 flags, skb_shinfo(skb)->nr_frags, dma_addr);
878 desc->cmd = cpu_to_le64(cmd);
879 desc->len = cpu_to_le16(skb_headlen(skb));
880 desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
881 desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
882 desc->csum_offset = cpu_to_le16(skb->csum_offset);
883
884 if (skb->csum_not_inet)
885 stats->crc32_csum++;
886 else
887 stats->csum++;
888
889 return 0;
890}
891
892static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb)
893{
894 struct ionic_tx_stats *stats = q_to_tx_stats(q);
895 struct ionic_txq_desc *desc = q->head->desc;
896 struct device *dev = q->lif->ionic->dev;
897 dma_addr_t dma_addr;
898 bool has_vlan;
899 u8 flags = 0;
900 bool encap;
901 u64 cmd;
902
903 has_vlan = !!skb_vlan_tag_present(skb);
904 encap = skb->encapsulation;
905
906 dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
907 if (dma_mapping_error(dev, dma_addr))
908 return -ENOMEM;
909
910 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
911 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
912
913 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE,
914 flags, skb_shinfo(skb)->nr_frags, dma_addr);
915 desc->cmd = cpu_to_le64(cmd);
916 desc->len = cpu_to_le16(skb_headlen(skb));
917 desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
918
919 stats->no_csum++;
920
921 return 0;
922}
923
924static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb)
925{
926 struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc;
927 unsigned int len_left = skb->len - skb_headlen(skb);
928 struct ionic_txq_sg_elem *elem = sg_desc->elems;
929 struct ionic_tx_stats *stats = q_to_tx_stats(q);
930 struct device *dev = q->lif->ionic->dev;
931 dma_addr_t dma_addr;
932 skb_frag_t *frag;
933 u16 len;
934
935 for (frag = skb_shinfo(skb)->frags; len_left; frag++, elem++) {
936 len = skb_frag_size(frag);
937 elem->len = cpu_to_le16(len);
938 dma_addr = ionic_tx_map_frag(q, frag, 0, len);
939 if (dma_mapping_error(dev, dma_addr))
940 return -ENOMEM;
941 elem->addr = cpu_to_le64(dma_addr);
942 len_left -= len;
943 stats->frags++;
944 }
945
946 return 0;
947}
948
949static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
950{
951 struct ionic_tx_stats *stats = q_to_tx_stats(q);
952 int err;
953
954
955 if (skb->ip_summed == CHECKSUM_PARTIAL)
956 err = ionic_tx_calc_csum(q, skb);
957 else
958 err = ionic_tx_calc_no_csum(q, skb);
959 if (err)
960 return err;
961
962
963 err = ionic_tx_skb_frags(q, skb);
964 if (err)
965 return err;
966
967 skb_tx_timestamp(skb);
968 stats->pkts++;
969 stats->bytes += skb->len;
970
971 netdev_tx_sent_queue(q_to_ndq(q), skb->len);
972 ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
973
974 return 0;
975}
976
977static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
978{
979 struct ionic_tx_stats *stats = q_to_tx_stats(q);
980 int err;
981
982
983 if (skb_is_gso(skb))
984 return (skb->len / skb_shinfo(skb)->gso_size) + 1;
985
986
987 if (skb_shinfo(skb)->nr_frags <= IONIC_TX_MAX_SG_ELEMS)
988 return 1;
989
990
991 err = skb_linearize(skb);
992 if (err)
993 return err;
994
995 stats->linearize++;
996
997
998 return 1;
999}
1000
1001static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs)
1002{
1003 int stopped = 0;
1004
1005 if (unlikely(!ionic_q_has_space(q, ndescs))) {
1006 netif_stop_subqueue(q->lif->netdev, q->index);
1007 q->stop++;
1008 stopped = 1;
1009
1010
1011 smp_rmb();
1012 if (ionic_q_has_space(q, ndescs)) {
1013 netif_wake_subqueue(q->lif->netdev, q->index);
1014 stopped = 0;
1015 }
1016 }
1017
1018 return stopped;
1019}
1020
1021netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1022{
1023 u16 queue_index = skb_get_queue_mapping(skb);
1024 struct ionic_lif *lif = netdev_priv(netdev);
1025 struct ionic_queue *q;
1026 int ndescs;
1027 int err;
1028
1029 if (unlikely(!test_bit(IONIC_LIF_UP, lif->state))) {
1030 dev_kfree_skb(skb);
1031 return NETDEV_TX_OK;
1032 }
1033
1034 if (unlikely(!lif_to_txqcq(lif, queue_index)))
1035 queue_index = 0;
1036 q = lif_to_txq(lif, queue_index);
1037
1038 ndescs = ionic_tx_descs_needed(q, skb);
1039 if (ndescs < 0)
1040 goto err_out_drop;
1041
1042 if (unlikely(ionic_maybe_stop_tx(q, ndescs)))
1043 return NETDEV_TX_BUSY;
1044
1045 if (skb_is_gso(skb))
1046 err = ionic_tx_tso(q, skb);
1047 else
1048 err = ionic_tx(q, skb);
1049
1050 if (err)
1051 goto err_out_drop;
1052
1053
1054
1055
1056
1057 ionic_maybe_stop_tx(q, 4);
1058
1059 return NETDEV_TX_OK;
1060
1061err_out_drop:
1062 q->stop++;
1063 q->drop++;
1064 dev_kfree_skb(skb);
1065 return NETDEV_TX_OK;
1066}
1067