1
2
3
4#include <linux/ip.h>
5#include <linux/ipv6.h>
6#include <linux/if_vlan.h>
7#include <net/ip6_checksum.h>
8
9#include "ionic.h"
10#include "ionic_lif.h"
11#include "ionic_txrx.h"
12
13
14static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
15 ionic_desc_cb cb_func, void *cb_arg)
16{
17 DEBUG_STATS_TXQ_POST(q, ring_dbell);
18
19 ionic_q_post(q, ring_dbell, cb_func, cb_arg);
20}
21
22static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
23 ionic_desc_cb cb_func, void *cb_arg)
24{
25 ionic_q_post(q, ring_dbell, cb_func, cb_arg);
26
27 DEBUG_STATS_RX_BUFF_CNT(q);
28}
29
30static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
31{
32 return netdev_get_tx_queue(q->lif->netdev, q->index);
33}
34
35static void ionic_rx_buf_reset(struct ionic_buf_info *buf_info)
36{
37 buf_info->page = NULL;
38 buf_info->page_offset = 0;
39 buf_info->dma_addr = 0;
40}
41
42static int ionic_rx_page_alloc(struct ionic_queue *q,
43 struct ionic_buf_info *buf_info)
44{
45 struct net_device *netdev = q->lif->netdev;
46 struct ionic_rx_stats *stats;
47 struct device *dev;
48
49 dev = q->dev;
50 stats = q_to_rx_stats(q);
51
52 if (unlikely(!buf_info)) {
53 net_err_ratelimited("%s: %s invalid buf_info in alloc\n",
54 netdev->name, q->name);
55 return -EINVAL;
56 }
57
58 buf_info->page = alloc_pages(IONIC_PAGE_GFP_MASK, 0);
59 if (unlikely(!buf_info->page)) {
60 net_err_ratelimited("%s: %s page alloc failed\n",
61 netdev->name, q->name);
62 stats->alloc_err++;
63 return -ENOMEM;
64 }
65 buf_info->page_offset = 0;
66
67 buf_info->dma_addr = dma_map_page(dev, buf_info->page, buf_info->page_offset,
68 IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
69 if (unlikely(dma_mapping_error(dev, buf_info->dma_addr))) {
70 __free_pages(buf_info->page, 0);
71 ionic_rx_buf_reset(buf_info);
72 net_err_ratelimited("%s: %s dma map failed\n",
73 netdev->name, q->name);
74 stats->dma_map_err++;
75 return -EIO;
76 }
77
78 return 0;
79}
80
81static void ionic_rx_page_free(struct ionic_queue *q,
82 struct ionic_buf_info *buf_info)
83{
84 struct net_device *netdev = q->lif->netdev;
85 struct device *dev = q->dev;
86
87 if (unlikely(!buf_info)) {
88 net_err_ratelimited("%s: %s invalid buf_info in free\n",
89 netdev->name, q->name);
90 return;
91 }
92
93 if (!buf_info->page)
94 return;
95
96 dma_unmap_page(dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
97 __free_pages(buf_info->page, 0);
98 ionic_rx_buf_reset(buf_info);
99}
100
101static bool ionic_rx_buf_recycle(struct ionic_queue *q,
102 struct ionic_buf_info *buf_info, u32 used)
103{
104 u32 size;
105
106
107 if (page_is_pfmemalloc(buf_info->page))
108 return false;
109
110
111 if (page_to_nid(buf_info->page) != numa_mem_id())
112 return false;
113
114 size = ALIGN(used, IONIC_PAGE_SPLIT_SZ);
115 buf_info->page_offset += size;
116 if (buf_info->page_offset >= IONIC_PAGE_SIZE)
117 return false;
118
119 get_page(buf_info->page);
120
121 return true;
122}
123
124static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
125 struct ionic_desc_info *desc_info,
126 struct ionic_rxq_comp *comp)
127{
128 struct net_device *netdev = q->lif->netdev;
129 struct ionic_buf_info *buf_info;
130 struct ionic_rx_stats *stats;
131 struct device *dev = q->dev;
132 struct sk_buff *skb;
133 unsigned int i;
134 u16 frag_len;
135 u16 len;
136
137 stats = q_to_rx_stats(q);
138
139 buf_info = &desc_info->bufs[0];
140 len = le16_to_cpu(comp->len);
141
142 prefetch(buf_info->page);
143
144 skb = napi_get_frags(&q_to_qcq(q)->napi);
145 if (unlikely(!skb)) {
146 net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
147 netdev->name, q->name);
148 stats->alloc_err++;
149 return NULL;
150 }
151
152 i = comp->num_sg_elems + 1;
153 do {
154 if (unlikely(!buf_info->page)) {
155 dev_kfree_skb(skb);
156 return NULL;
157 }
158
159 frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset);
160 len -= frag_len;
161
162 dma_sync_single_for_cpu(dev,
163 buf_info->dma_addr + buf_info->page_offset,
164 frag_len, DMA_FROM_DEVICE);
165
166 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
167 buf_info->page, buf_info->page_offset, frag_len,
168 IONIC_PAGE_SIZE);
169
170 if (!ionic_rx_buf_recycle(q, buf_info, frag_len)) {
171 dma_unmap_page(dev, buf_info->dma_addr,
172 IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
173 ionic_rx_buf_reset(buf_info);
174 }
175
176 buf_info++;
177
178 i--;
179 } while (i > 0);
180
181 return skb;
182}
183
184static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
185 struct ionic_desc_info *desc_info,
186 struct ionic_rxq_comp *comp)
187{
188 struct net_device *netdev = q->lif->netdev;
189 struct ionic_buf_info *buf_info;
190 struct ionic_rx_stats *stats;
191 struct device *dev = q->dev;
192 struct sk_buff *skb;
193 u16 len;
194
195 stats = q_to_rx_stats(q);
196
197 buf_info = &desc_info->bufs[0];
198 len = le16_to_cpu(comp->len);
199
200 skb = napi_alloc_skb(&q_to_qcq(q)->napi, len);
201 if (unlikely(!skb)) {
202 net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
203 netdev->name, q->name);
204 stats->alloc_err++;
205 return NULL;
206 }
207
208 if (unlikely(!buf_info->page)) {
209 dev_kfree_skb(skb);
210 return NULL;
211 }
212
213 dma_sync_single_for_cpu(dev, buf_info->dma_addr + buf_info->page_offset,
214 len, DMA_FROM_DEVICE);
215 skb_copy_to_linear_data(skb, page_address(buf_info->page) + buf_info->page_offset, len);
216 dma_sync_single_for_device(dev, buf_info->dma_addr + buf_info->page_offset,
217 len, DMA_FROM_DEVICE);
218
219 skb_put(skb, len);
220 skb->protocol = eth_type_trans(skb, q->lif->netdev);
221
222 return skb;
223}
224
225static void ionic_rx_clean(struct ionic_queue *q,
226 struct ionic_desc_info *desc_info,
227 struct ionic_cq_info *cq_info,
228 void *cb_arg)
229{
230 struct net_device *netdev = q->lif->netdev;
231 struct ionic_qcq *qcq = q_to_qcq(q);
232 struct ionic_rx_stats *stats;
233 struct ionic_rxq_comp *comp;
234 struct sk_buff *skb;
235
236 comp = cq_info->cq_desc + qcq->cq.desc_size - sizeof(*comp);
237
238 stats = q_to_rx_stats(q);
239
240 if (comp->status) {
241 stats->dropped++;
242 return;
243 }
244
245 stats->pkts++;
246 stats->bytes += le16_to_cpu(comp->len);
247
248 if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
249 skb = ionic_rx_copybreak(q, desc_info, comp);
250 else
251 skb = ionic_rx_frags(q, desc_info, comp);
252
253 if (unlikely(!skb)) {
254 stats->dropped++;
255 return;
256 }
257
258 skb_record_rx_queue(skb, q->index);
259
260 if (likely(netdev->features & NETIF_F_RXHASH)) {
261 switch (comp->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) {
262 case IONIC_PKT_TYPE_IPV4:
263 case IONIC_PKT_TYPE_IPV6:
264 skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
265 PKT_HASH_TYPE_L3);
266 break;
267 case IONIC_PKT_TYPE_IPV4_TCP:
268 case IONIC_PKT_TYPE_IPV6_TCP:
269 case IONIC_PKT_TYPE_IPV4_UDP:
270 case IONIC_PKT_TYPE_IPV6_UDP:
271 skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
272 PKT_HASH_TYPE_L4);
273 break;
274 }
275 }
276
277 if (likely(netdev->features & NETIF_F_RXCSUM) &&
278 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC)) {
279 skb->ip_summed = CHECKSUM_COMPLETE;
280 skb->csum = (__force __wsum)le16_to_cpu(comp->csum);
281 stats->csum_complete++;
282 } else {
283 stats->csum_none++;
284 }
285
286 if (unlikely((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
287 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) ||
288 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)))
289 stats->csum_error++;
290
291 if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
292 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)) {
293 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
294 le16_to_cpu(comp->vlan_tci));
295 stats->vlan_stripped++;
296 }
297
298 if (unlikely(q->features & IONIC_RXQ_F_HWSTAMP)) {
299 __le64 *cq_desc_hwstamp;
300 u64 hwstamp;
301
302 cq_desc_hwstamp =
303 cq_info->cq_desc +
304 qcq->cq.desc_size -
305 sizeof(struct ionic_rxq_comp) -
306 IONIC_HWSTAMP_CQ_NEGOFFSET;
307
308 hwstamp = le64_to_cpu(*cq_desc_hwstamp);
309
310 if (hwstamp != IONIC_HWSTAMP_INVALID) {
311 skb_hwtstamps(skb)->hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp);
312 stats->hwstamp_valid++;
313 } else {
314 stats->hwstamp_invalid++;
315 }
316 }
317
318 if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
319 napi_gro_receive(&qcq->napi, skb);
320 else
321 napi_gro_frags(&qcq->napi);
322}
323
324bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
325{
326 struct ionic_queue *q = cq->bound_q;
327 struct ionic_desc_info *desc_info;
328 struct ionic_rxq_comp *comp;
329
330 comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp);
331
332 if (!color_match(comp->pkt_type_color, cq->done_color))
333 return false;
334
335
336 if (q->tail_idx == q->head_idx)
337 return false;
338
339 if (q->tail_idx != le16_to_cpu(comp->comp_index))
340 return false;
341
342 desc_info = &q->info[q->tail_idx];
343 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
344
345
346 ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg);
347
348 desc_info->cb = NULL;
349 desc_info->cb_arg = NULL;
350
351 return true;
352}
353
354void ionic_rx_fill(struct ionic_queue *q)
355{
356 struct net_device *netdev = q->lif->netdev;
357 struct ionic_desc_info *desc_info;
358 struct ionic_rxq_sg_desc *sg_desc;
359 struct ionic_rxq_sg_elem *sg_elem;
360 struct ionic_buf_info *buf_info;
361 struct ionic_rxq_desc *desc;
362 unsigned int remain_len;
363 unsigned int frag_len;
364 unsigned int nfrags;
365 unsigned int i, j;
366 unsigned int len;
367
368 len = netdev->mtu + ETH_HLEN + VLAN_HLEN;
369
370 for (i = ionic_q_space_avail(q); i; i--) {
371 nfrags = 0;
372 remain_len = len;
373 desc_info = &q->info[q->head_idx];
374 desc = desc_info->desc;
375 buf_info = &desc_info->bufs[0];
376
377 if (!buf_info->page) {
378 if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
379 desc->addr = 0;
380 desc->len = 0;
381 return;
382 }
383 }
384
385
386 desc->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
387 frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset);
388 desc->len = cpu_to_le16(frag_len);
389 remain_len -= frag_len;
390 buf_info++;
391 nfrags++;
392
393
394 sg_desc = desc_info->sg_desc;
395 for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++) {
396 sg_elem = &sg_desc->elems[j];
397 if (!buf_info->page) {
398 if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
399 sg_elem->addr = 0;
400 sg_elem->len = 0;
401 return;
402 }
403 }
404
405 sg_elem->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
406 frag_len = min_t(u16, remain_len, IONIC_PAGE_SIZE - buf_info->page_offset);
407 sg_elem->len = cpu_to_le16(frag_len);
408 remain_len -= frag_len;
409 buf_info++;
410 nfrags++;
411 }
412
413
414 if (j < q->max_sg_elems) {
415 sg_elem = &sg_desc->elems[j];
416 memset(sg_elem, 0, sizeof(*sg_elem));
417 }
418
419 desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
420 IONIC_RXQ_DESC_OPCODE_SIMPLE;
421 desc_info->nbufs = nfrags;
422
423 ionic_rxq_post(q, false, ionic_rx_clean, NULL);
424 }
425
426 ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
427 q->dbval | q->head_idx);
428}
429
430void ionic_rx_empty(struct ionic_queue *q)
431{
432 struct ionic_desc_info *desc_info;
433 struct ionic_buf_info *buf_info;
434 unsigned int i, j;
435
436 for (i = 0; i < q->num_descs; i++) {
437 desc_info = &q->info[i];
438 for (j = 0; j < IONIC_RX_MAX_SG_ELEMS + 1; j++) {
439 buf_info = &desc_info->bufs[j];
440 if (buf_info->page)
441 ionic_rx_page_free(q, buf_info);
442 }
443
444 desc_info->nbufs = 0;
445 desc_info->cb = NULL;
446 desc_info->cb_arg = NULL;
447 }
448
449 q->head_idx = 0;
450 q->tail_idx = 0;
451}
452
453static void ionic_dim_update(struct ionic_qcq *qcq, int napi_mode)
454{
455 struct dim_sample dim_sample;
456 struct ionic_lif *lif;
457 unsigned int qi;
458 u64 pkts, bytes;
459
460 if (!qcq->intr.dim_coal_hw)
461 return;
462
463 lif = qcq->q.lif;
464 qi = qcq->cq.bound_q->index;
465
466 switch (napi_mode) {
467 case IONIC_LIF_F_TX_DIM_INTR:
468 pkts = lif->txqstats[qi].pkts;
469 bytes = lif->txqstats[qi].bytes;
470 break;
471 case IONIC_LIF_F_RX_DIM_INTR:
472 pkts = lif->rxqstats[qi].pkts;
473 bytes = lif->rxqstats[qi].bytes;
474 break;
475 default:
476 pkts = lif->txqstats[qi].pkts + lif->rxqstats[qi].pkts;
477 bytes = lif->txqstats[qi].bytes + lif->rxqstats[qi].bytes;
478 break;
479 }
480
481 dim_update_sample(qcq->cq.bound_intr->rearm_count,
482 pkts, bytes, &dim_sample);
483
484 net_dim(&qcq->dim, dim_sample);
485}
486
487int ionic_tx_napi(struct napi_struct *napi, int budget)
488{
489 struct ionic_qcq *qcq = napi_to_qcq(napi);
490 struct ionic_cq *cq = napi_to_cq(napi);
491 struct ionic_dev *idev;
492 struct ionic_lif *lif;
493 u32 work_done = 0;
494 u32 flags = 0;
495
496 lif = cq->bound_q->lif;
497 idev = &lif->ionic->idev;
498
499 work_done = ionic_cq_service(cq, budget,
500 ionic_tx_service, NULL, NULL);
501
502 if (work_done < budget && napi_complete_done(napi, work_done)) {
503 ionic_dim_update(qcq, IONIC_LIF_F_TX_DIM_INTR);
504 flags |= IONIC_INTR_CRED_UNMASK;
505 cq->bound_intr->rearm_count++;
506 }
507
508 if (work_done || flags) {
509 flags |= IONIC_INTR_CRED_RESET_COALESCE;
510 ionic_intr_credits(idev->intr_ctrl,
511 cq->bound_intr->index,
512 work_done, flags);
513 }
514
515 DEBUG_STATS_NAPI_POLL(qcq, work_done);
516
517 return work_done;
518}
519
520int ionic_rx_napi(struct napi_struct *napi, int budget)
521{
522 struct ionic_qcq *qcq = napi_to_qcq(napi);
523 struct ionic_cq *cq = napi_to_cq(napi);
524 struct ionic_dev *idev;
525 struct ionic_lif *lif;
526 u16 rx_fill_threshold;
527 u32 work_done = 0;
528 u32 flags = 0;
529
530 lif = cq->bound_q->lif;
531 idev = &lif->ionic->idev;
532
533 work_done = ionic_cq_service(cq, budget,
534 ionic_rx_service, NULL, NULL);
535
536 rx_fill_threshold = min_t(u16, IONIC_RX_FILL_THRESHOLD,
537 cq->num_descs / IONIC_RX_FILL_DIV);
538 if (work_done && ionic_q_space_avail(cq->bound_q) >= rx_fill_threshold)
539 ionic_rx_fill(cq->bound_q);
540
541 if (work_done < budget && napi_complete_done(napi, work_done)) {
542 ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR);
543 flags |= IONIC_INTR_CRED_UNMASK;
544 cq->bound_intr->rearm_count++;
545 }
546
547 if (work_done || flags) {
548 flags |= IONIC_INTR_CRED_RESET_COALESCE;
549 ionic_intr_credits(idev->intr_ctrl,
550 cq->bound_intr->index,
551 work_done, flags);
552 }
553
554 DEBUG_STATS_NAPI_POLL(qcq, work_done);
555
556 return work_done;
557}
558
559int ionic_txrx_napi(struct napi_struct *napi, int budget)
560{
561 struct ionic_qcq *qcq = napi_to_qcq(napi);
562 struct ionic_cq *rxcq = napi_to_cq(napi);
563 unsigned int qi = rxcq->bound_q->index;
564 struct ionic_dev *idev;
565 struct ionic_lif *lif;
566 struct ionic_cq *txcq;
567 u16 rx_fill_threshold;
568 u32 rx_work_done = 0;
569 u32 tx_work_done = 0;
570 u32 flags = 0;
571
572 lif = rxcq->bound_q->lif;
573 idev = &lif->ionic->idev;
574 txcq = &lif->txqcqs[qi]->cq;
575
576 tx_work_done = ionic_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT,
577 ionic_tx_service, NULL, NULL);
578
579 rx_work_done = ionic_cq_service(rxcq, budget,
580 ionic_rx_service, NULL, NULL);
581
582 rx_fill_threshold = min_t(u16, IONIC_RX_FILL_THRESHOLD,
583 rxcq->num_descs / IONIC_RX_FILL_DIV);
584 if (rx_work_done && ionic_q_space_avail(rxcq->bound_q) >= rx_fill_threshold)
585 ionic_rx_fill(rxcq->bound_q);
586
587 if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
588 ionic_dim_update(qcq, 0);
589 flags |= IONIC_INTR_CRED_UNMASK;
590 rxcq->bound_intr->rearm_count++;
591 }
592
593 if (rx_work_done || flags) {
594 flags |= IONIC_INTR_CRED_RESET_COALESCE;
595 ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index,
596 tx_work_done + rx_work_done, flags);
597 }
598
599 DEBUG_STATS_NAPI_POLL(qcq, rx_work_done);
600 DEBUG_STATS_NAPI_POLL(qcq, tx_work_done);
601
602 return rx_work_done;
603}
604
605static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
606 void *data, size_t len)
607{
608 struct ionic_tx_stats *stats = q_to_tx_stats(q);
609 struct device *dev = q->dev;
610 dma_addr_t dma_addr;
611
612 dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE);
613 if (dma_mapping_error(dev, dma_addr)) {
614 net_warn_ratelimited("%s: DMA single map failed on %s!\n",
615 q->lif->netdev->name, q->name);
616 stats->dma_map_err++;
617 return 0;
618 }
619 return dma_addr;
620}
621
622static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
623 const skb_frag_t *frag,
624 size_t offset, size_t len)
625{
626 struct ionic_tx_stats *stats = q_to_tx_stats(q);
627 struct device *dev = q->dev;
628 dma_addr_t dma_addr;
629
630 dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE);
631 if (dma_mapping_error(dev, dma_addr)) {
632 net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
633 q->lif->netdev->name, q->name);
634 stats->dma_map_err++;
635 }
636 return dma_addr;
637}
638
639static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
640 struct ionic_desc_info *desc_info)
641{
642 struct ionic_buf_info *buf_info = desc_info->bufs;
643 struct ionic_tx_stats *stats = q_to_tx_stats(q);
644 struct device *dev = q->dev;
645 dma_addr_t dma_addr;
646 unsigned int nfrags;
647 skb_frag_t *frag;
648 int frag_idx;
649
650 dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
651 if (dma_mapping_error(dev, dma_addr)) {
652 stats->dma_map_err++;
653 return -EIO;
654 }
655 buf_info->dma_addr = dma_addr;
656 buf_info->len = skb_headlen(skb);
657 buf_info++;
658
659 frag = skb_shinfo(skb)->frags;
660 nfrags = skb_shinfo(skb)->nr_frags;
661 for (frag_idx = 0; frag_idx < nfrags; frag_idx++, frag++) {
662 dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
663 if (dma_mapping_error(dev, dma_addr)) {
664 stats->dma_map_err++;
665 goto dma_fail;
666 }
667 buf_info->dma_addr = dma_addr;
668 buf_info->len = skb_frag_size(frag);
669 buf_info++;
670 }
671
672 desc_info->nbufs = 1 + nfrags;
673
674 return 0;
675
676dma_fail:
677
678 while (frag_idx > 0) {
679 frag_idx--;
680 buf_info--;
681 dma_unmap_page(dev, buf_info->dma_addr,
682 buf_info->len, DMA_TO_DEVICE);
683 }
684 dma_unmap_single(dev, buf_info->dma_addr, buf_info->len, DMA_TO_DEVICE);
685 return -EIO;
686}
687
688static void ionic_tx_clean(struct ionic_queue *q,
689 struct ionic_desc_info *desc_info,
690 struct ionic_cq_info *cq_info,
691 void *cb_arg)
692{
693 struct ionic_buf_info *buf_info = desc_info->bufs;
694 struct ionic_tx_stats *stats = q_to_tx_stats(q);
695 struct ionic_qcq *qcq = q_to_qcq(q);
696 struct sk_buff *skb = cb_arg;
697 struct device *dev = q->dev;
698 unsigned int i;
699 u16 qi;
700
701 if (desc_info->nbufs) {
702 dma_unmap_single(dev, (dma_addr_t)buf_info->dma_addr,
703 buf_info->len, DMA_TO_DEVICE);
704 buf_info++;
705 for (i = 1; i < desc_info->nbufs; i++, buf_info++)
706 dma_unmap_page(dev, (dma_addr_t)buf_info->dma_addr,
707 buf_info->len, DMA_TO_DEVICE);
708 }
709
710 if (!skb)
711 return;
712
713 qi = skb_get_queue_mapping(skb);
714
715 if (unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) {
716 if (cq_info) {
717 struct skb_shared_hwtstamps hwts = {};
718 __le64 *cq_desc_hwstamp;
719 u64 hwstamp;
720
721 cq_desc_hwstamp =
722 cq_info->cq_desc +
723 qcq->cq.desc_size -
724 sizeof(struct ionic_txq_comp) -
725 IONIC_HWSTAMP_CQ_NEGOFFSET;
726
727 hwstamp = le64_to_cpu(*cq_desc_hwstamp);
728
729 if (hwstamp != IONIC_HWSTAMP_INVALID) {
730 hwts.hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp);
731
732 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
733 skb_tstamp_tx(skb, &hwts);
734
735 stats->hwstamp_valid++;
736 } else {
737 stats->hwstamp_invalid++;
738 }
739 }
740
741 } else if (unlikely(__netif_subqueue_stopped(q->lif->netdev, qi))) {
742 netif_wake_subqueue(q->lif->netdev, qi);
743 q->wake++;
744 }
745
746 desc_info->bytes = skb->len;
747 stats->clean++;
748
749 dev_consume_skb_any(skb);
750}
751
752bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
753{
754 struct ionic_queue *q = cq->bound_q;
755 struct ionic_desc_info *desc_info;
756 struct ionic_txq_comp *comp;
757 int bytes = 0;
758 int pkts = 0;
759 u16 index;
760
761 comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp);
762
763 if (!color_match(comp->color, cq->done_color))
764 return false;
765
766
767
768
769 do {
770 desc_info = &q->info[q->tail_idx];
771 desc_info->bytes = 0;
772 index = q->tail_idx;
773 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
774 ionic_tx_clean(q, desc_info, cq_info, desc_info->cb_arg);
775 if (desc_info->cb_arg) {
776 pkts++;
777 bytes += desc_info->bytes;
778 }
779 desc_info->cb = NULL;
780 desc_info->cb_arg = NULL;
781 } while (index != le16_to_cpu(comp->comp_index));
782
783 if (pkts && bytes && !unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
784 netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes);
785
786 return true;
787}
788
789void ionic_tx_flush(struct ionic_cq *cq)
790{
791 struct ionic_dev *idev = &cq->lif->ionic->idev;
792 u32 work_done;
793
794 work_done = ionic_cq_service(cq, cq->num_descs,
795 ionic_tx_service, NULL, NULL);
796 if (work_done)
797 ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
798 work_done, IONIC_INTR_CRED_RESET_COALESCE);
799}
800
801void ionic_tx_empty(struct ionic_queue *q)
802{
803 struct ionic_desc_info *desc_info;
804 int bytes = 0;
805 int pkts = 0;
806
807
808 while (q->head_idx != q->tail_idx) {
809 desc_info = &q->info[q->tail_idx];
810 desc_info->bytes = 0;
811 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
812 ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg);
813 if (desc_info->cb_arg) {
814 pkts++;
815 bytes += desc_info->bytes;
816 }
817 desc_info->cb = NULL;
818 desc_info->cb_arg = NULL;
819 }
820
821 if (pkts && bytes && !unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
822 netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes);
823}
824
825static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb)
826{
827 int err;
828
829 err = skb_cow_head(skb, 0);
830 if (err)
831 return err;
832
833 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
834 inner_ip_hdr(skb)->check = 0;
835 inner_tcp_hdr(skb)->check =
836 ~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr,
837 inner_ip_hdr(skb)->daddr,
838 0, IPPROTO_TCP, 0);
839 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
840 inner_tcp_hdr(skb)->check =
841 ~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr,
842 &inner_ipv6_hdr(skb)->daddr,
843 0, IPPROTO_TCP, 0);
844 }
845
846 return 0;
847}
848
849static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
850{
851 int err;
852
853 err = skb_cow_head(skb, 0);
854 if (err)
855 return err;
856
857 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
858 ip_hdr(skb)->check = 0;
859 tcp_hdr(skb)->check =
860 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
861 ip_hdr(skb)->daddr,
862 0, IPPROTO_TCP, 0);
863 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
864 tcp_v6_gso_csum_prep(skb);
865 }
866
867 return 0;
868}
869
870static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc,
871 struct sk_buff *skb,
872 dma_addr_t addr, u8 nsge, u16 len,
873 unsigned int hdrlen, unsigned int mss,
874 bool outer_csum,
875 u16 vlan_tci, bool has_vlan,
876 bool start, bool done)
877{
878 u8 flags = 0;
879 u64 cmd;
880
881 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
882 flags |= outer_csum ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
883 flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0;
884 flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0;
885
886 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, flags, nsge, addr);
887 desc->cmd = cpu_to_le64(cmd);
888 desc->len = cpu_to_le16(len);
889 desc->vlan_tci = cpu_to_le16(vlan_tci);
890 desc->hdr_len = cpu_to_le16(hdrlen);
891 desc->mss = cpu_to_le16(mss);
892
893 if (start) {
894 skb_tx_timestamp(skb);
895 if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
896 netdev_tx_sent_queue(q_to_ndq(q), skb->len);
897 ionic_txq_post(q, false, ionic_tx_clean, skb);
898 } else {
899 ionic_txq_post(q, done, NULL, NULL);
900 }
901}
902
903static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
904{
905 struct ionic_tx_stats *stats = q_to_tx_stats(q);
906 struct ionic_desc_info *desc_info;
907 struct ionic_buf_info *buf_info;
908 struct ionic_txq_sg_elem *elem;
909 struct ionic_txq_desc *desc;
910 unsigned int chunk_len;
911 unsigned int frag_rem;
912 unsigned int tso_rem;
913 unsigned int seg_rem;
914 dma_addr_t desc_addr;
915 dma_addr_t frag_addr;
916 unsigned int hdrlen;
917 unsigned int len;
918 unsigned int mss;
919 bool start, done;
920 bool outer_csum;
921 bool has_vlan;
922 u16 desc_len;
923 u8 desc_nsge;
924 u16 vlan_tci;
925 bool encap;
926 int err;
927
928 desc_info = &q->info[q->head_idx];
929 buf_info = desc_info->bufs;
930
931 if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
932 return -EIO;
933
934 len = skb->len;
935 mss = skb_shinfo(skb)->gso_size;
936 outer_csum = (skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM) ||
937 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
938 has_vlan = !!skb_vlan_tag_present(skb);
939 vlan_tci = skb_vlan_tag_get(skb);
940 encap = skb->encapsulation;
941
942
943
944
945
946
947 if (encap)
948 err = ionic_tx_tcp_inner_pseudo_csum(skb);
949 else
950 err = ionic_tx_tcp_pseudo_csum(skb);
951 if (err)
952 return err;
953
954 if (encap)
955 hdrlen = skb_inner_transport_header(skb) - skb->data +
956 inner_tcp_hdrlen(skb);
957 else
958 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
959
960 tso_rem = len;
961 seg_rem = min(tso_rem, hdrlen + mss);
962
963 frag_addr = 0;
964 frag_rem = 0;
965
966 start = true;
967
968 while (tso_rem > 0) {
969 desc = NULL;
970 elem = NULL;
971 desc_addr = 0;
972 desc_len = 0;
973 desc_nsge = 0;
974
975 while (seg_rem > 0) {
976
977 if (frag_rem == 0) {
978
979 frag_addr = buf_info->dma_addr;
980 frag_rem = buf_info->len;
981 buf_info++;
982 }
983 chunk_len = min(frag_rem, seg_rem);
984 if (!desc) {
985
986 desc = desc_info->txq_desc;
987 elem = desc_info->txq_sg_desc->elems;
988 desc_addr = frag_addr;
989 desc_len = chunk_len;
990 } else {
991
992 elem->addr = cpu_to_le64(frag_addr);
993 elem->len = cpu_to_le16(chunk_len);
994 elem++;
995 desc_nsge++;
996 }
997 frag_addr += chunk_len;
998 frag_rem -= chunk_len;
999 tso_rem -= chunk_len;
1000 seg_rem -= chunk_len;
1001 }
1002 seg_rem = min(tso_rem, mss);
1003 done = (tso_rem == 0);
1004
1005 ionic_tx_tso_post(q, desc, skb,
1006 desc_addr, desc_nsge, desc_len,
1007 hdrlen, mss, outer_csum, vlan_tci, has_vlan,
1008 start, done);
1009 start = false;
1010
1011 desc_info = &q->info[q->head_idx];
1012 desc_info->nbufs = 0;
1013 }
1014
1015 stats->pkts += DIV_ROUND_UP(len - hdrlen, mss);
1016 stats->bytes += len;
1017 stats->tso++;
1018 stats->tso_bytes = len;
1019
1020 return 0;
1021}
1022
1023static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
1024 struct ionic_desc_info *desc_info)
1025{
1026 struct ionic_txq_desc *desc = desc_info->txq_desc;
1027 struct ionic_buf_info *buf_info = desc_info->bufs;
1028 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1029 bool has_vlan;
1030 u8 flags = 0;
1031 bool encap;
1032 u64 cmd;
1033
1034 has_vlan = !!skb_vlan_tag_present(skb);
1035 encap = skb->encapsulation;
1036
1037 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
1038 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
1039
1040 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL,
1041 flags, skb_shinfo(skb)->nr_frags,
1042 buf_info->dma_addr);
1043 desc->cmd = cpu_to_le64(cmd);
1044 desc->len = cpu_to_le16(buf_info->len);
1045 if (has_vlan) {
1046 desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
1047 stats->vlan_inserted++;
1048 } else {
1049 desc->vlan_tci = 0;
1050 }
1051 desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
1052 desc->csum_offset = cpu_to_le16(skb->csum_offset);
1053
1054 if (skb_csum_is_sctp(skb))
1055 stats->crc32_csum++;
1056 else
1057 stats->csum++;
1058
1059 return 0;
1060}
1061
1062static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
1063 struct ionic_desc_info *desc_info)
1064{
1065 struct ionic_txq_desc *desc = desc_info->txq_desc;
1066 struct ionic_buf_info *buf_info = desc_info->bufs;
1067 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1068 bool has_vlan;
1069 u8 flags = 0;
1070 bool encap;
1071 u64 cmd;
1072
1073 has_vlan = !!skb_vlan_tag_present(skb);
1074 encap = skb->encapsulation;
1075
1076 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
1077 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
1078
1079 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE,
1080 flags, skb_shinfo(skb)->nr_frags,
1081 buf_info->dma_addr);
1082 desc->cmd = cpu_to_le64(cmd);
1083 desc->len = cpu_to_le16(buf_info->len);
1084 if (has_vlan) {
1085 desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
1086 stats->vlan_inserted++;
1087 } else {
1088 desc->vlan_tci = 0;
1089 }
1090 desc->csum_start = 0;
1091 desc->csum_offset = 0;
1092
1093 stats->csum_none++;
1094
1095 return 0;
1096}
1097
1098static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
1099 struct ionic_desc_info *desc_info)
1100{
1101 struct ionic_txq_sg_desc *sg_desc = desc_info->txq_sg_desc;
1102 struct ionic_buf_info *buf_info = &desc_info->bufs[1];
1103 struct ionic_txq_sg_elem *elem = sg_desc->elems;
1104 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1105 unsigned int i;
1106
1107 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, buf_info++, elem++) {
1108 elem->addr = cpu_to_le64(buf_info->dma_addr);
1109 elem->len = cpu_to_le16(buf_info->len);
1110 }
1111
1112 stats->frags += skb_shinfo(skb)->nr_frags;
1113
1114 return 0;
1115}
1116
1117static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
1118{
1119 struct ionic_desc_info *desc_info = &q->info[q->head_idx];
1120 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1121 int err;
1122
1123 if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
1124 return -EIO;
1125
1126
1127 if (skb->ip_summed == CHECKSUM_PARTIAL)
1128 err = ionic_tx_calc_csum(q, skb, desc_info);
1129 else
1130 err = ionic_tx_calc_no_csum(q, skb, desc_info);
1131 if (err)
1132 return err;
1133
1134
1135 err = ionic_tx_skb_frags(q, skb, desc_info);
1136 if (err)
1137 return err;
1138
1139 skb_tx_timestamp(skb);
1140 stats->pkts++;
1141 stats->bytes += skb->len;
1142
1143 if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
1144 netdev_tx_sent_queue(q_to_ndq(q), skb->len);
1145 ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
1146
1147 return 0;
1148}
1149
1150static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
1151{
1152 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1153 int ndescs;
1154 int err;
1155
1156
1157 if (skb_is_gso(skb))
1158 ndescs = skb_shinfo(skb)->gso_segs;
1159 else
1160 ndescs = 1;
1161
1162
1163 if (skb_shinfo(skb)->nr_frags <= q->max_sg_elems)
1164 return ndescs;
1165
1166
1167 err = skb_linearize(skb);
1168 if (err)
1169 return err;
1170
1171 stats->linearize++;
1172
1173 return ndescs;
1174}
1175
1176static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs)
1177{
1178 int stopped = 0;
1179
1180 if (unlikely(!ionic_q_has_space(q, ndescs))) {
1181 netif_stop_subqueue(q->lif->netdev, q->index);
1182 q->stop++;
1183 stopped = 1;
1184
1185
1186 smp_rmb();
1187 if (ionic_q_has_space(q, ndescs)) {
1188 netif_wake_subqueue(q->lif->netdev, q->index);
1189 stopped = 0;
1190 }
1191 }
1192
1193 return stopped;
1194}
1195
1196static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb,
1197 struct net_device *netdev)
1198{
1199 struct ionic_lif *lif = netdev_priv(netdev);
1200 struct ionic_queue *q = &lif->hwstamp_txq->q;
1201 int err, ndescs;
1202
1203
1204
1205
1206
1207
1208 ndescs = ionic_tx_descs_needed(q, skb);
1209 if (unlikely(ndescs < 0))
1210 goto err_out_drop;
1211
1212 if (unlikely(!ionic_q_has_space(q, ndescs)))
1213 goto err_out_drop;
1214
1215 skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP;
1216 if (skb_is_gso(skb))
1217 err = ionic_tx_tso(q, skb);
1218 else
1219 err = ionic_tx(q, skb);
1220
1221 if (err)
1222 goto err_out_drop;
1223
1224 return NETDEV_TX_OK;
1225
1226err_out_drop:
1227 q->drop++;
1228 dev_kfree_skb(skb);
1229 return NETDEV_TX_OK;
1230}
1231
1232netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1233{
1234 u16 queue_index = skb_get_queue_mapping(skb);
1235 struct ionic_lif *lif = netdev_priv(netdev);
1236 struct ionic_queue *q;
1237 int ndescs;
1238 int err;
1239
1240 if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state))) {
1241 dev_kfree_skb(skb);
1242 return NETDEV_TX_OK;
1243 }
1244
1245 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
1246 if (lif->hwstamp_txq && lif->phc->ts_config_tx_mode)
1247 return ionic_start_hwstamp_xmit(skb, netdev);
1248
1249 if (unlikely(queue_index >= lif->nxqs))
1250 queue_index = 0;
1251 q = &lif->txqcqs[queue_index]->q;
1252
1253 ndescs = ionic_tx_descs_needed(q, skb);
1254 if (ndescs < 0)
1255 goto err_out_drop;
1256
1257 if (unlikely(ionic_maybe_stop_tx(q, ndescs)))
1258 return NETDEV_TX_BUSY;
1259
1260 if (skb_is_gso(skb))
1261 err = ionic_tx_tso(q, skb);
1262 else
1263 err = ionic_tx(q, skb);
1264
1265 if (err)
1266 goto err_out_drop;
1267
1268
1269
1270
1271
1272 ionic_maybe_stop_tx(q, 4);
1273
1274 return NETDEV_TX_OK;
1275
1276err_out_drop:
1277 q->stop++;
1278 q->drop++;
1279 dev_kfree_skb(skb);
1280 return NETDEV_TX_OK;
1281}
1282