1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/tcp.h>
34#include <linux/if_vlan.h>
35#include <net/dsfield.h>
36#include "en.h"
37#include "ipoib/ipoib.h"
38#include "en_accel/ipsec_rxtx.h"
39#include "lib/clock.h"
40
41#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
42#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
43 MLX5E_SQ_NOPS_ROOM)
44
45static inline void mlx5e_tx_dma_unmap(struct device *pdev,
46 struct mlx5e_sq_dma *dma)
47{
48 switch (dma->type) {
49 case MLX5E_DMA_MAP_SINGLE:
50 dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
51 break;
52 case MLX5E_DMA_MAP_PAGE:
53 dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
54 break;
55 default:
56 WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
57 }
58}
59
60static inline void mlx5e_dma_push(struct mlx5e_txqsq *sq,
61 dma_addr_t addr,
62 u32 size,
63 enum mlx5e_dma_map_type map_type)
64{
65 u32 i = sq->dma_fifo_pc & sq->dma_fifo_mask;
66
67 sq->db.dma_fifo[i].addr = addr;
68 sq->db.dma_fifo[i].size = size;
69 sq->db.dma_fifo[i].type = map_type;
70 sq->dma_fifo_pc++;
71}
72
73static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i)
74{
75 return &sq->db.dma_fifo[i & sq->dma_fifo_mask];
76}
77
78static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
79{
80 int i;
81
82 for (i = 0; i < num_dma; i++) {
83 struct mlx5e_sq_dma *last_pushed_dma =
84 mlx5e_dma_get(sq, --sq->dma_fifo_pc);
85
86 mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma);
87 }
88}
89
90#ifdef CONFIG_MLX5_CORE_EN_DCB
91static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb)
92{
93 int dscp_cp = 0;
94
95 if (skb->protocol == htons(ETH_P_IP))
96 dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
97 else if (skb->protocol == htons(ETH_P_IPV6))
98 dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
99
100 return priv->dcbx_dp.dscp2prio[dscp_cp];
101}
102#endif
103
104u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
105 void *accel_priv, select_queue_fallback_t fallback)
106{
107 struct mlx5e_priv *priv = netdev_priv(dev);
108 int channel_ix = fallback(dev, skb);
109 u16 num_channels;
110 int up = 0;
111
112 if (!netdev_get_num_tc(dev))
113 return channel_ix;
114
115#ifdef CONFIG_MLX5_CORE_EN_DCB
116 if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP)
117 up = mlx5e_get_dscp_up(priv, skb);
118 else
119#endif
120 if (skb_vlan_tag_present(skb))
121 up = skb->vlan_tci >> VLAN_PRIO_SHIFT;
122
123
124
125
126 num_channels = priv->channels.params.num_channels;
127 if (channel_ix >= num_channels)
128 channel_ix = reciprocal_scale(channel_ix, num_channels);
129
130 return priv->channel_tc2txq[channel_ix][up];
131}
132
133static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
134{
135#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
136
137 return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
138}
139
140static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb)
141{
142 struct flow_keys keys;
143
144 if (skb_transport_header_was_set(skb))
145 return skb_transport_offset(skb);
146 else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
147 return keys.control.thoff;
148 else
149 return mlx5e_skb_l2_header_offset(skb);
150}
151
152static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
153 struct sk_buff *skb)
154{
155 u16 hlen;
156
157 switch (mode) {
158 case MLX5_INLINE_MODE_NONE:
159 return 0;
160 case MLX5_INLINE_MODE_TCP_UDP:
161 hlen = eth_get_headlen(skb->data, skb_headlen(skb));
162 if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
163 hlen += VLAN_HLEN;
164 break;
165 case MLX5_INLINE_MODE_IP:
166
167
168
169
170 if (skb_transport_offset(skb)) {
171 hlen = mlx5e_skb_l3_header_offset(skb);
172 break;
173 }
174
175 case MLX5_INLINE_MODE_L2:
176 default:
177 hlen = mlx5e_skb_l2_header_offset(skb);
178 }
179 return min_t(u16, hlen, skb_headlen(skb));
180}
181
182static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
183 unsigned int *skb_len,
184 unsigned int len)
185{
186 *skb_len -= len;
187 *skb_data += len;
188}
189
190static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs,
191 unsigned char **skb_data,
192 unsigned int *skb_len)
193{
194 struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
195 int cpy1_sz = 2 * ETH_ALEN;
196 int cpy2_sz = ihs - cpy1_sz;
197
198 memcpy(vhdr, *skb_data, cpy1_sz);
199 mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy1_sz);
200 vhdr->h_vlan_proto = skb->vlan_proto;
201 vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
202 memcpy(&vhdr->h_vlan_encapsulated_proto, *skb_data, cpy2_sz);
203 mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy2_sz);
204}
205
206static inline void
207mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
208{
209 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
210 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
211 if (skb->encapsulation) {
212 eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
213 MLX5_ETH_WQE_L4_INNER_CSUM;
214 sq->stats.csum_partial_inner++;
215 } else {
216 eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
217 sq->stats.csum_partial++;
218 }
219 } else
220 sq->stats.csum_none++;
221}
222
223static inline u16
224mlx5e_txwqe_build_eseg_gso(struct mlx5e_txqsq *sq, struct sk_buff *skb,
225 struct mlx5_wqe_eth_seg *eseg, unsigned int *num_bytes)
226{
227 u16 ihs;
228
229 eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
230
231 if (skb->encapsulation) {
232 ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
233 sq->stats.tso_inner_packets++;
234 sq->stats.tso_inner_bytes += skb->len - ihs;
235 } else {
236 ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
237 sq->stats.tso_packets++;
238 sq->stats.tso_bytes += skb->len - ihs;
239 }
240
241 *num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
242 return ihs;
243}
244
245static inline int
246mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
247 unsigned char *skb_data, u16 headlen,
248 struct mlx5_wqe_data_seg *dseg)
249{
250 dma_addr_t dma_addr = 0;
251 u8 num_dma = 0;
252 int i;
253
254 if (headlen) {
255 dma_addr = dma_map_single(sq->pdev, skb_data, headlen,
256 DMA_TO_DEVICE);
257 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
258 goto dma_unmap_wqe_err;
259
260 dseg->addr = cpu_to_be64(dma_addr);
261 dseg->lkey = sq->mkey_be;
262 dseg->byte_count = cpu_to_be32(headlen);
263
264 mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
265 num_dma++;
266 dseg++;
267 }
268
269 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
270 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
271 int fsz = skb_frag_size(frag);
272
273 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
274 DMA_TO_DEVICE);
275 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
276 goto dma_unmap_wqe_err;
277
278 dseg->addr = cpu_to_be64(dma_addr);
279 dseg->lkey = sq->mkey_be;
280 dseg->byte_count = cpu_to_be32(fsz);
281
282 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
283 num_dma++;
284 dseg++;
285 }
286
287 return num_dma;
288
289dma_unmap_wqe_err:
290 mlx5e_dma_unmap_wqe_err(sq, num_dma);
291 return -ENOMEM;
292}
293
294static inline void
295mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
296 u8 opcode, u16 ds_cnt, u32 num_bytes, u8 num_dma,
297 struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg)
298{
299 struct mlx5_wq_cyc *wq = &sq->wq;
300 u16 pi;
301
302 wi->num_bytes = num_bytes;
303 wi->num_dma = num_dma;
304 wi->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
305 wi->skb = skb;
306
307 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
308 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
309
310 netdev_tx_sent_queue(sq->txq, num_bytes);
311
312 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
313 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
314
315 sq->pc += wi->num_wqebbs;
316 if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM))) {
317 netif_tx_stop_queue(sq->txq);
318 sq->stats.stopped++;
319 }
320
321 if (!skb->xmit_more || netif_xmit_stopped(sq->txq))
322 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
323
324
325 while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
326 sq->db.wqe_info[pi].skb = NULL;
327 mlx5e_post_nop(wq, sq->sqn, &sq->pc);
328 sq->stats.nop++;
329 }
330}
331
332static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
333 struct mlx5e_tx_wqe *wqe, u16 pi)
334{
335 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
336
337 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
338 struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
339
340 unsigned char *skb_data = skb->data;
341 unsigned int skb_len = skb->len;
342 u8 opcode = MLX5_OPCODE_SEND;
343 unsigned int num_bytes;
344 int num_dma;
345 u16 headlen;
346 u16 ds_cnt;
347 u16 ihs;
348
349 mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
350
351 if (skb_is_gso(skb)) {
352 opcode = MLX5_OPCODE_LSO;
353 ihs = mlx5e_txwqe_build_eseg_gso(sq, skb, eseg, &num_bytes);
354 sq->stats.packets += skb_shinfo(skb)->gso_segs;
355 } else {
356 ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
357 num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
358 sq->stats.packets++;
359 }
360 sq->stats.bytes += num_bytes;
361 sq->stats.xmit_more += skb->xmit_more;
362
363 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
364 if (ihs) {
365 if (skb_vlan_tag_present(skb)) {
366 mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs, &skb_data, &skb_len);
367 ihs += VLAN_HLEN;
368 sq->stats.added_vlan_packets++;
369 } else {
370 memcpy(eseg->inline_hdr.start, skb_data, ihs);
371 mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
372 }
373 eseg->inline_hdr.sz = cpu_to_be16(ihs);
374 ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS);
375 } else if (skb_vlan_tag_present(skb)) {
376 eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
377 if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD))
378 eseg->insert.type |= cpu_to_be16(MLX5_ETH_WQE_SVLAN);
379 eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb));
380 sq->stats.added_vlan_packets++;
381 }
382
383 headlen = skb_len - skb->data_len;
384 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
385 (struct mlx5_wqe_data_seg *)cseg + ds_cnt);
386 if (unlikely(num_dma < 0))
387 goto err_drop;
388
389 mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
390 num_bytes, num_dma, wi, cseg);
391
392 return NETDEV_TX_OK;
393
394err_drop:
395 sq->stats.dropped++;
396 dev_kfree_skb_any(skb);
397
398 return NETDEV_TX_OK;
399}
400
401netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
402{
403 struct mlx5e_priv *priv = netdev_priv(dev);
404 struct mlx5e_txqsq *sq = priv->txq2sq[skb_get_queue_mapping(skb)];
405 struct mlx5_wq_cyc *wq = &sq->wq;
406 u16 pi = sq->pc & wq->sz_m1;
407 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
408
409 memset(wqe, 0, sizeof(*wqe));
410
411#ifdef CONFIG_MLX5_EN_IPSEC
412 if (sq->state & BIT(MLX5E_SQ_STATE_IPSEC)) {
413 skb = mlx5e_ipsec_handle_tx_skb(dev, wqe, skb);
414 if (unlikely(!skb))
415 return NETDEV_TX_OK;
416 }
417#endif
418
419 return mlx5e_sq_xmit(sq, skb, wqe, pi);
420}
421
422static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq,
423 struct mlx5_err_cqe *err_cqe)
424{
425 u32 ci = mlx5_cqwq_get_ci(&sq->cq.wq);
426
427 netdev_err(sq->channel->netdev,
428 "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
429 sq->cq.mcq.cqn, ci, sq->sqn, err_cqe->syndrome,
430 err_cqe->vendor_err_synd);
431 mlx5_dump_err_cqe(sq->cq.mdev, err_cqe);
432}
433
434bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
435{
436 struct mlx5e_txqsq *sq;
437 struct mlx5_cqe64 *cqe;
438 u32 dma_fifo_cc;
439 u32 nbytes;
440 u16 npkts;
441 u16 sqcc;
442 int i;
443
444 sq = container_of(cq, struct mlx5e_txqsq, cq);
445
446 if (unlikely(!MLX5E_TEST_BIT(sq->state, MLX5E_SQ_STATE_ENABLED)))
447 return false;
448
449 cqe = mlx5_cqwq_get_cqe(&cq->wq);
450 if (!cqe)
451 return false;
452
453 npkts = 0;
454 nbytes = 0;
455
456
457
458
459 sqcc = sq->cc;
460
461
462 dma_fifo_cc = sq->dma_fifo_cc;
463
464 i = 0;
465 do {
466 u16 wqe_counter;
467 bool last_wqe;
468
469 mlx5_cqwq_pop(&cq->wq);
470
471 wqe_counter = be16_to_cpu(cqe->wqe_counter);
472
473 if (unlikely(cqe->op_own >> 4 == MLX5_CQE_REQ_ERR)) {
474 if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
475 &sq->state)) {
476 mlx5e_dump_error_cqe(sq,
477 (struct mlx5_err_cqe *)cqe);
478 queue_work(cq->channel->priv->wq,
479 &sq->recover.recover_work);
480 }
481 sq->stats.cqe_err++;
482 }
483
484 do {
485 struct mlx5e_tx_wqe_info *wi;
486 struct sk_buff *skb;
487 u16 ci;
488 int j;
489
490 last_wqe = (sqcc == wqe_counter);
491
492 ci = sqcc & sq->wq.sz_m1;
493 wi = &sq->db.wqe_info[ci];
494 skb = wi->skb;
495
496 if (unlikely(!skb)) {
497 sqcc++;
498 continue;
499 }
500
501 if (unlikely(skb_shinfo(skb)->tx_flags &
502 SKBTX_HW_TSTAMP)) {
503 struct skb_shared_hwtstamps hwts = {};
504
505 hwts.hwtstamp =
506 mlx5_timecounter_cyc2time(sq->clock,
507 get_cqe_ts(cqe));
508 skb_tstamp_tx(skb, &hwts);
509 }
510
511 for (j = 0; j < wi->num_dma; j++) {
512 struct mlx5e_sq_dma *dma =
513 mlx5e_dma_get(sq, dma_fifo_cc++);
514
515 mlx5e_tx_dma_unmap(sq->pdev, dma);
516 }
517
518 npkts++;
519 nbytes += wi->num_bytes;
520 sqcc += wi->num_wqebbs;
521 napi_consume_skb(skb, napi_budget);
522 } while (!last_wqe);
523
524 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
525
526 mlx5_cqwq_update_db_record(&cq->wq);
527
528
529 wmb();
530
531 sq->dma_fifo_cc = dma_fifo_cc;
532 sq->cc = sqcc;
533
534 netdev_tx_completed_queue(sq->txq, npkts, nbytes);
535
536 if (netif_tx_queue_stopped(sq->txq) &&
537 mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc,
538 MLX5E_SQ_STOP_ROOM) &&
539 !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
540 netif_tx_wake_queue(sq->txq);
541 sq->stats.wake++;
542 }
543
544 return (i == MLX5E_TX_CQ_POLL_BUDGET);
545}
546
547void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
548{
549 struct mlx5e_tx_wqe_info *wi;
550 struct sk_buff *skb;
551 u16 ci;
552 int i;
553
554 while (sq->cc != sq->pc) {
555 ci = sq->cc & sq->wq.sz_m1;
556 wi = &sq->db.wqe_info[ci];
557 skb = wi->skb;
558
559 if (!skb) {
560 sq->cc++;
561 continue;
562 }
563
564 for (i = 0; i < wi->num_dma; i++) {
565 struct mlx5e_sq_dma *dma =
566 mlx5e_dma_get(sq, sq->dma_fifo_cc++);
567
568 mlx5e_tx_dma_unmap(sq->pdev, dma);
569 }
570
571 dev_kfree_skb_any(skb);
572 sq->cc += wi->num_wqebbs;
573 }
574}
575
576#ifdef CONFIG_MLX5_CORE_IPOIB
577
578struct mlx5_wqe_eth_pad {
579 u8 rsvd0[16];
580};
581
582struct mlx5i_tx_wqe {
583 struct mlx5_wqe_ctrl_seg ctrl;
584 struct mlx5_wqe_datagram_seg datagram;
585 struct mlx5_wqe_eth_pad pad;
586 struct mlx5_wqe_eth_seg eth;
587};
588
589static inline void
590mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey,
591 struct mlx5_wqe_datagram_seg *dseg)
592{
593 memcpy(&dseg->av, av, sizeof(struct mlx5_av));
594 dseg->av.dqp_dct = cpu_to_be32(dqpn | MLX5_EXTENDED_UD_AV);
595 dseg->av.key.qkey.qkey = cpu_to_be32(dqkey);
596}
597
598netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
599 struct mlx5_av *av, u32 dqpn, u32 dqkey)
600{
601 struct mlx5_wq_cyc *wq = &sq->wq;
602 u16 pi = sq->pc & wq->sz_m1;
603 struct mlx5i_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
604 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
605
606 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
607 struct mlx5_wqe_datagram_seg *datagram = &wqe->datagram;
608 struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
609
610 unsigned char *skb_data = skb->data;
611 unsigned int skb_len = skb->len;
612 u8 opcode = MLX5_OPCODE_SEND;
613 unsigned int num_bytes;
614 int num_dma;
615 u16 headlen;
616 u16 ds_cnt;
617 u16 ihs;
618
619 memset(wqe, 0, sizeof(*wqe));
620
621 mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram);
622
623 mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
624
625 if (skb_is_gso(skb)) {
626 opcode = MLX5_OPCODE_LSO;
627 ihs = mlx5e_txwqe_build_eseg_gso(sq, skb, eseg, &num_bytes);
628 sq->stats.packets += skb_shinfo(skb)->gso_segs;
629 } else {
630 ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
631 num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
632 sq->stats.packets++;
633 }
634
635 sq->stats.bytes += num_bytes;
636 sq->stats.xmit_more += skb->xmit_more;
637
638 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
639 if (ihs) {
640 memcpy(eseg->inline_hdr.start, skb_data, ihs);
641 mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
642 eseg->inline_hdr.sz = cpu_to_be16(ihs);
643 ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS);
644 }
645
646 headlen = skb_len - skb->data_len;
647 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
648 (struct mlx5_wqe_data_seg *)cseg + ds_cnt);
649 if (unlikely(num_dma < 0))
650 goto err_drop;
651
652 mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
653 num_bytes, num_dma, wi, cseg);
654
655 return NETDEV_TX_OK;
656
657err_drop:
658 sq->stats.dropped++;
659 dev_kfree_skb_any(skb);
660
661 return NETDEV_TX_OK;
662}
663
664#endif
665