1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/tcp.h>
34#include <linux/if_vlan.h>
35#include <net/geneve.h>
36#include <net/dsfield.h>
37#include "en.h"
38#include "en/txrx.h"
39#include "ipoib/ipoib.h"
40#include "en_accel/en_accel.h"
41#include "en/ptp.h"
42
43static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
44{
45 int i;
46
47 for (i = 0; i < num_dma; i++) {
48 struct mlx5e_sq_dma *last_pushed_dma =
49 mlx5e_dma_get(sq, --sq->dma_fifo_pc);
50
51 mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma);
52 }
53}
54
55#ifdef CONFIG_MLX5_CORE_EN_DCB
56static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb)
57{
58 int dscp_cp = 0;
59
60 if (skb->protocol == htons(ETH_P_IP))
61 dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
62 else if (skb->protocol == htons(ETH_P_IPV6))
63 dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
64
65 return priv->dcbx_dp.dscp2prio[dscp_cp];
66}
67#endif
68
69static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb)
70{
71 struct mlx5e_priv *priv = netdev_priv(dev);
72 int up = 0;
73
74 if (!netdev_get_num_tc(dev))
75 goto return_txq;
76
77#ifdef CONFIG_MLX5_CORE_EN_DCB
78 if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP)
79 up = mlx5e_get_dscp_up(priv, skb);
80 else
81#endif
82 if (skb_vlan_tag_present(skb))
83 up = skb_vlan_tag_get_prio(skb);
84
85return_txq:
86 return priv->port_ptp_tc2realtxq[up];
87}
88
89static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb,
90 u16 htb_maj_id)
91{
92 u16 classid;
93
94 if ((TC_H_MAJ(skb->priority) >> 16) == htb_maj_id)
95 classid = TC_H_MIN(skb->priority);
96 else
97 classid = READ_ONCE(priv->htb.defcls);
98
99 if (!classid)
100 return 0;
101
102 return mlx5e_get_txq_by_classid(priv, classid);
103}
104
105u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
106 struct net_device *sb_dev)
107{
108 struct mlx5e_priv *priv = netdev_priv(dev);
109 int num_tc_x_num_ch;
110 int txq_ix;
111 int up = 0;
112 int ch_ix;
113
114
115 num_tc_x_num_ch = READ_ONCE(priv->num_tc_x_num_ch);
116 if (unlikely(dev->real_num_tx_queues > num_tc_x_num_ch)) {
117 struct mlx5e_ptp *ptp_channel;
118
119
120 u16 htb_maj_id = smp_load_acquire(&priv->htb.maj_id);
121
122 if (unlikely(htb_maj_id)) {
123 txq_ix = mlx5e_select_htb_queue(priv, skb, htb_maj_id);
124 if (txq_ix > 0)
125 return txq_ix;
126 }
127
128 ptp_channel = READ_ONCE(priv->channels.ptp);
129 if (unlikely(ptp_channel &&
130 test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state) &&
131 mlx5e_use_ptpsq(skb)))
132 return mlx5e_select_ptpsq(dev, skb);
133
134 txq_ix = netdev_pick_tx(dev, skb, NULL);
135
136
137
138
139
140 if (unlikely(txq_ix >= num_tc_x_num_ch))
141 txq_ix %= num_tc_x_num_ch;
142 } else {
143 txq_ix = netdev_pick_tx(dev, skb, NULL);
144 }
145
146 if (!netdev_get_num_tc(dev))
147 return txq_ix;
148
149#ifdef CONFIG_MLX5_CORE_EN_DCB
150 if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP)
151 up = mlx5e_get_dscp_up(priv, skb);
152 else
153#endif
154 if (skb_vlan_tag_present(skb))
155 up = skb_vlan_tag_get_prio(skb);
156
157
158
159
160
161 ch_ix = priv->txq2sq[txq_ix]->ch_ix;
162
163 return priv->channel_tc2realtxq[ch_ix][up];
164}
165
166static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
167{
168#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
169
170 return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
171}
172
173static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb)
174{
175 if (skb_transport_header_was_set(skb))
176 return skb_transport_offset(skb);
177 else
178 return mlx5e_skb_l2_header_offset(skb);
179}
180
181static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
182 struct sk_buff *skb)
183{
184 u16 hlen;
185
186 switch (mode) {
187 case MLX5_INLINE_MODE_NONE:
188 return 0;
189 case MLX5_INLINE_MODE_TCP_UDP:
190 hlen = eth_get_headlen(skb->dev, skb->data, skb_headlen(skb));
191 if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
192 hlen += VLAN_HLEN;
193 break;
194 case MLX5_INLINE_MODE_IP:
195 hlen = mlx5e_skb_l3_header_offset(skb);
196 break;
197 case MLX5_INLINE_MODE_L2:
198 default:
199 hlen = mlx5e_skb_l2_header_offset(skb);
200 }
201 return min_t(u16, hlen, skb_headlen(skb));
202}
203
204static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
205{
206 struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
207 int cpy1_sz = 2 * ETH_ALEN;
208 int cpy2_sz = ihs - cpy1_sz;
209
210 memcpy(vhdr, skb->data, cpy1_sz);
211 vhdr->h_vlan_proto = skb->vlan_proto;
212 vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
213 memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);
214}
215
216static void
217ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
218 struct mlx5_wqe_eth_seg *eseg)
219{
220 struct xfrm_offload *xo = xfrm_offload(skb);
221
222 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
223 if (xo->inner_ipproto) {
224 eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM | MLX5_ETH_WQE_L3_INNER_CSUM;
225 } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
226 eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
227 sq->stats->csum_partial_inner++;
228 }
229}
230
231static inline void
232mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
233 struct mlx5e_accel_tx_state *accel,
234 struct mlx5_wqe_eth_seg *eseg)
235{
236 if (unlikely(mlx5e_ipsec_eseg_meta(eseg))) {
237 ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
238 return;
239 }
240
241 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
242 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
243 if (skb->encapsulation) {
244 eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
245 MLX5_ETH_WQE_L4_INNER_CSUM;
246 sq->stats->csum_partial_inner++;
247 } else {
248 eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
249 sq->stats->csum_partial++;
250 }
251#ifdef CONFIG_MLX5_EN_TLS
252 } else if (unlikely(accel && accel->tls.tls_tisn)) {
253 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
254 sq->stats->csum_partial++;
255#endif
256 } else
257 sq->stats->csum_none++;
258}
259
260static inline u16
261mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb)
262{
263 struct mlx5e_sq_stats *stats = sq->stats;
264 u16 ihs;
265
266 if (skb->encapsulation) {
267 ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
268 stats->tso_inner_packets++;
269 stats->tso_inner_bytes += skb->len - ihs;
270 } else {
271 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
272 ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
273 else
274 ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
275 stats->tso_packets++;
276 stats->tso_bytes += skb->len - ihs;
277 }
278
279 return ihs;
280}
281
282static inline int
283mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
284 unsigned char *skb_data, u16 headlen,
285 struct mlx5_wqe_data_seg *dseg)
286{
287 dma_addr_t dma_addr = 0;
288 u8 num_dma = 0;
289 int i;
290
291 if (headlen) {
292 dma_addr = dma_map_single(sq->pdev, skb_data, headlen,
293 DMA_TO_DEVICE);
294 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
295 goto dma_unmap_wqe_err;
296
297 dseg->addr = cpu_to_be64(dma_addr);
298 dseg->lkey = sq->mkey_be;
299 dseg->byte_count = cpu_to_be32(headlen);
300
301 mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
302 num_dma++;
303 dseg++;
304 }
305
306 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
307 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
308 int fsz = skb_frag_size(frag);
309
310 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
311 DMA_TO_DEVICE);
312 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
313 goto dma_unmap_wqe_err;
314
315 dseg->addr = cpu_to_be64(dma_addr);
316 dseg->lkey = sq->mkey_be;
317 dseg->byte_count = cpu_to_be32(fsz);
318
319 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
320 num_dma++;
321 dseg++;
322 }
323
324 return num_dma;
325
326dma_unmap_wqe_err:
327 mlx5e_dma_unmap_wqe_err(sq, num_dma);
328 return -ENOMEM;
329}
330
331struct mlx5e_tx_attr {
332 u32 num_bytes;
333 u16 headlen;
334 u16 ihs;
335 __be16 mss;
336 u16 insz;
337 u8 opcode;
338};
339
340struct mlx5e_tx_wqe_attr {
341 u16 ds_cnt;
342 u16 ds_cnt_inl;
343 u16 ds_cnt_ids;
344 u8 num_wqebbs;
345};
346
347static u8
348mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct sk_buff *skb,
349 struct mlx5e_accel_tx_state *accel)
350{
351 u8 mode;
352
353#ifdef CONFIG_MLX5_EN_TLS
354 if (accel && accel->tls.tls_tisn)
355 return MLX5_INLINE_MODE_TCP_UDP;
356#endif
357
358 mode = sq->min_inline_mode;
359
360 if (skb_vlan_tag_present(skb) &&
361 test_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state))
362 mode = max_t(u8, MLX5_INLINE_MODE_L2, mode);
363
364 return mode;
365}
366
367static void mlx5e_sq_xmit_prepare(struct mlx5e_txqsq *sq, struct sk_buff *skb,
368 struct mlx5e_accel_tx_state *accel,
369 struct mlx5e_tx_attr *attr)
370{
371 struct mlx5e_sq_stats *stats = sq->stats;
372
373 if (skb_is_gso(skb)) {
374 u16 ihs = mlx5e_tx_get_gso_ihs(sq, skb);
375
376 *attr = (struct mlx5e_tx_attr) {
377 .opcode = MLX5_OPCODE_LSO,
378 .mss = cpu_to_be16(skb_shinfo(skb)->gso_size),
379 .ihs = ihs,
380 .num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs,
381 .headlen = skb_headlen(skb) - ihs,
382 };
383
384 stats->packets += skb_shinfo(skb)->gso_segs;
385 } else {
386 u8 mode = mlx5e_tx_wqe_inline_mode(sq, skb, accel);
387 u16 ihs = mlx5e_calc_min_inline(mode, skb);
388
389 *attr = (struct mlx5e_tx_attr) {
390 .opcode = MLX5_OPCODE_SEND,
391 .mss = cpu_to_be16(0),
392 .ihs = ihs,
393 .num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN),
394 .headlen = skb_headlen(skb) - ihs,
395 };
396
397 stats->packets++;
398 }
399
400 attr->insz = mlx5e_accel_tx_ids_len(sq, accel);
401 stats->bytes += attr->num_bytes;
402}
403
404static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_attr *attr,
405 struct mlx5e_tx_wqe_attr *wqe_attr)
406{
407 u16 ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT;
408 u16 ds_cnt_inl = 0;
409 u16 ds_cnt_ids = 0;
410
411 if (attr->insz)
412 ds_cnt_ids = DIV_ROUND_UP(sizeof(struct mlx5_wqe_inline_seg) + attr->insz,
413 MLX5_SEND_WQE_DS);
414
415 ds_cnt += !!attr->headlen + skb_shinfo(skb)->nr_frags + ds_cnt_ids;
416 if (attr->ihs) {
417 u16 inl = attr->ihs - INL_HDR_START_SZ;
418
419 if (skb_vlan_tag_present(skb))
420 inl += VLAN_HLEN;
421
422 ds_cnt_inl = DIV_ROUND_UP(inl, MLX5_SEND_WQE_DS);
423 ds_cnt += ds_cnt_inl;
424 }
425
426 *wqe_attr = (struct mlx5e_tx_wqe_attr) {
427 .ds_cnt = ds_cnt,
428 .ds_cnt_inl = ds_cnt_inl,
429 .ds_cnt_ids = ds_cnt_ids,
430 .num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS),
431 };
432}
433
434static void mlx5e_tx_skb_update_hwts_flags(struct sk_buff *skb)
435{
436 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
437 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
438}
439
440static void mlx5e_tx_check_stop(struct mlx5e_txqsq *sq)
441{
442 if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room))) {
443 netif_tx_stop_queue(sq->txq);
444 sq->stats->stopped++;
445 }
446}
447
448static inline void
449mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
450 const struct mlx5e_tx_attr *attr,
451 const struct mlx5e_tx_wqe_attr *wqe_attr, u8 num_dma,
452 struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg,
453 bool xmit_more)
454{
455 struct mlx5_wq_cyc *wq = &sq->wq;
456 bool send_doorbell;
457
458 *wi = (struct mlx5e_tx_wqe_info) {
459 .skb = skb,
460 .num_bytes = attr->num_bytes,
461 .num_dma = num_dma,
462 .num_wqebbs = wqe_attr->num_wqebbs,
463 .num_fifo_pkts = 0,
464 };
465
466 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | attr->opcode);
467 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | wqe_attr->ds_cnt);
468
469 mlx5e_tx_skb_update_hwts_flags(skb);
470
471 sq->pc += wi->num_wqebbs;
472
473 mlx5e_tx_check_stop(sq);
474
475 if (unlikely(sq->ptpsq)) {
476 mlx5e_skb_cb_hwtstamp_init(skb);
477 mlx5e_skb_fifo_push(&sq->ptpsq->skb_fifo, skb);
478 skb_get(skb);
479 }
480
481 send_doorbell = __netdev_tx_sent_queue(sq->txq, attr->num_bytes, xmit_more);
482 if (send_doorbell)
483 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
484}
485
486static void
487mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
488 const struct mlx5e_tx_attr *attr, const struct mlx5e_tx_wqe_attr *wqe_attr,
489 struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more)
490{
491 struct mlx5_wqe_ctrl_seg *cseg;
492 struct mlx5_wqe_eth_seg *eseg;
493 struct mlx5_wqe_data_seg *dseg;
494 struct mlx5e_tx_wqe_info *wi;
495
496 struct mlx5e_sq_stats *stats = sq->stats;
497 int num_dma;
498
499 stats->xmit_more += xmit_more;
500
501
502 wi = &sq->db.wqe_info[pi];
503 cseg = &wqe->ctrl;
504 eseg = &wqe->eth;
505 dseg = wqe->data;
506
507 eseg->mss = attr->mss;
508
509 if (attr->ihs) {
510 if (skb_vlan_tag_present(skb)) {
511 eseg->inline_hdr.sz |= cpu_to_be16(attr->ihs + VLAN_HLEN);
512 mlx5e_insert_vlan(eseg->inline_hdr.start, skb, attr->ihs);
513 stats->added_vlan_packets++;
514 } else {
515 eseg->inline_hdr.sz |= cpu_to_be16(attr->ihs);
516 memcpy(eseg->inline_hdr.start, skb->data, attr->ihs);
517 }
518 dseg += wqe_attr->ds_cnt_inl;
519 } else if (skb_vlan_tag_present(skb)) {
520 eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
521 if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD))
522 eseg->insert.type |= cpu_to_be16(MLX5_ETH_WQE_SVLAN);
523 eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb));
524 stats->added_vlan_packets++;
525 }
526
527 dseg += wqe_attr->ds_cnt_ids;
528 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr->ihs,
529 attr->headlen, dseg);
530 if (unlikely(num_dma < 0))
531 goto err_drop;
532
533 mlx5e_txwqe_complete(sq, skb, attr, wqe_attr, num_dma, wi, cseg, xmit_more);
534
535 return;
536
537err_drop:
538 stats->dropped++;
539 dev_kfree_skb_any(skb);
540}
541
542static bool mlx5e_tx_skb_supports_mpwqe(struct sk_buff *skb, struct mlx5e_tx_attr *attr)
543{
544 return !skb_is_nonlinear(skb) && !skb_vlan_tag_present(skb) && !attr->ihs &&
545 !attr->insz;
546}
547
548static bool mlx5e_tx_mpwqe_same_eseg(struct mlx5e_txqsq *sq, struct mlx5_wqe_eth_seg *eseg)
549{
550 struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
551
552
553 return !memcmp(&session->wqe->eth, eseg, MLX5E_ACCEL_ESEG_LEN);
554}
555
556static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq,
557 struct mlx5_wqe_eth_seg *eseg)
558{
559 struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
560 struct mlx5e_tx_wqe *wqe;
561 u16 pi;
562
563 pi = mlx5e_txqsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS);
564 wqe = MLX5E_TX_FETCH_WQE(sq, pi);
565 net_prefetchw(wqe->data);
566
567 *session = (struct mlx5e_tx_mpwqe) {
568 .wqe = wqe,
569 .bytes_count = 0,
570 .ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT,
571 .pkt_count = 0,
572 .inline_on = 0,
573 };
574
575 memcpy(&session->wqe->eth, eseg, MLX5E_ACCEL_ESEG_LEN);
576
577 sq->stats->mpwqe_blks++;
578}
579
580static bool mlx5e_tx_mpwqe_session_is_active(struct mlx5e_txqsq *sq)
581{
582 return sq->mpwqe.wqe;
583}
584
585static void mlx5e_tx_mpwqe_add_dseg(struct mlx5e_txqsq *sq, struct mlx5e_xmit_data *txd)
586{
587 struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
588 struct mlx5_wqe_data_seg *dseg;
589
590 dseg = (struct mlx5_wqe_data_seg *)session->wqe + session->ds_count;
591
592 session->pkt_count++;
593 session->bytes_count += txd->len;
594
595 dseg->addr = cpu_to_be64(txd->dma_addr);
596 dseg->byte_count = cpu_to_be32(txd->len);
597 dseg->lkey = sq->mkey_be;
598 session->ds_count++;
599
600 sq->stats->mpwqe_pkts++;
601}
602
603static struct mlx5_wqe_ctrl_seg *mlx5e_tx_mpwqe_session_complete(struct mlx5e_txqsq *sq)
604{
605 struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
606 u8 ds_count = session->ds_count;
607 struct mlx5_wqe_ctrl_seg *cseg;
608 struct mlx5e_tx_wqe_info *wi;
609 u16 pi;
610
611 cseg = &session->wqe->ctrl;
612 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_ENHANCED_MPSW);
613 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_count);
614
615 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
616 wi = &sq->db.wqe_info[pi];
617 *wi = (struct mlx5e_tx_wqe_info) {
618 .skb = NULL,
619 .num_bytes = session->bytes_count,
620 .num_wqebbs = DIV_ROUND_UP(ds_count, MLX5_SEND_WQEBB_NUM_DS),
621 .num_dma = session->pkt_count,
622 .num_fifo_pkts = session->pkt_count,
623 };
624
625 sq->pc += wi->num_wqebbs;
626
627 session->wqe = NULL;
628
629 mlx5e_tx_check_stop(sq);
630
631 return cseg;
632}
633
634static void
635mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
636 struct mlx5_wqe_eth_seg *eseg, bool xmit_more)
637{
638 struct mlx5_wqe_ctrl_seg *cseg;
639 struct mlx5e_xmit_data txd;
640
641 if (!mlx5e_tx_mpwqe_session_is_active(sq)) {
642 mlx5e_tx_mpwqe_session_start(sq, eseg);
643 } else if (!mlx5e_tx_mpwqe_same_eseg(sq, eseg)) {
644 mlx5e_tx_mpwqe_session_complete(sq);
645 mlx5e_tx_mpwqe_session_start(sq, eseg);
646 }
647
648 sq->stats->xmit_more += xmit_more;
649
650 txd.data = skb->data;
651 txd.len = skb->len;
652
653 txd.dma_addr = dma_map_single(sq->pdev, txd.data, txd.len, DMA_TO_DEVICE);
654 if (unlikely(dma_mapping_error(sq->pdev, txd.dma_addr)))
655 goto err_unmap;
656 mlx5e_dma_push(sq, txd.dma_addr, txd.len, MLX5E_DMA_MAP_SINGLE);
657
658 mlx5e_skb_fifo_push(&sq->db.skb_fifo, skb);
659
660 mlx5e_tx_mpwqe_add_dseg(sq, &txd);
661
662 mlx5e_tx_skb_update_hwts_flags(skb);
663
664 if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe))) {
665
666 cseg = mlx5e_tx_mpwqe_session_complete(sq);
667
668 if (__netdev_tx_sent_queue(sq->txq, txd.len, xmit_more))
669 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
670 } else if (__netdev_tx_sent_queue(sq->txq, txd.len, xmit_more)) {
671
672 cseg = mlx5e_tx_mpwqe_session_complete(sq);
673
674 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
675 }
676
677 return;
678
679err_unmap:
680 mlx5e_dma_unmap_wqe_err(sq, 1);
681 sq->stats->dropped++;
682 dev_kfree_skb_any(skb);
683}
684
685void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
686{
687
688 if (unlikely(mlx5e_tx_mpwqe_session_is_active(sq)))
689 mlx5e_tx_mpwqe_session_complete(sq);
690}
691
692static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
693 struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
694 struct mlx5_wqe_eth_seg *eseg, u16 ihs)
695{
696 mlx5e_accel_tx_eseg(priv, skb, eseg, ihs);
697 mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
698}
699
700netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
701{
702 struct mlx5e_priv *priv = netdev_priv(dev);
703 struct mlx5e_accel_tx_state accel = {};
704 struct mlx5e_tx_wqe_attr wqe_attr;
705 struct mlx5e_tx_attr attr;
706 struct mlx5e_tx_wqe *wqe;
707 struct mlx5e_txqsq *sq;
708 u16 pi;
709
710 sq = priv->txq2sq[skb_get_queue_mapping(skb)];
711 if (unlikely(!sq)) {
712 dev_kfree_skb_any(skb);
713 return NETDEV_TX_OK;
714 }
715
716
717 if (unlikely(!mlx5e_accel_tx_begin(dev, sq, skb, &accel)))
718 return NETDEV_TX_OK;
719
720 mlx5e_sq_xmit_prepare(sq, skb, &accel, &attr);
721
722 if (test_bit(MLX5E_SQ_STATE_MPWQE, &sq->state)) {
723 if (mlx5e_tx_skb_supports_mpwqe(skb, &attr)) {
724 struct mlx5_wqe_eth_seg eseg = {};
725
726 mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg, attr.ihs);
727 mlx5e_sq_xmit_mpwqe(sq, skb, &eseg, netdev_xmit_more());
728 return NETDEV_TX_OK;
729 }
730
731 mlx5e_tx_mpwqe_ensure_complete(sq);
732 }
733
734 mlx5e_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
735 pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
736 wqe = MLX5E_TX_FETCH_WQE(sq, pi);
737
738
739 mlx5e_accel_tx_finish(sq, wqe, &accel,
740 (struct mlx5_wqe_inline_seg *)(wqe->data + wqe_attr.ds_cnt_inl));
741 mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth, attr.ihs);
742 mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, netdev_xmit_more());
743
744 return NETDEV_TX_OK;
745}
746
747void mlx5e_sq_xmit_simple(struct mlx5e_txqsq *sq, struct sk_buff *skb, bool xmit_more)
748{
749 struct mlx5e_tx_wqe_attr wqe_attr;
750 struct mlx5e_tx_attr attr;
751 struct mlx5e_tx_wqe *wqe;
752 u16 pi;
753
754 mlx5e_sq_xmit_prepare(sq, skb, NULL, &attr);
755 mlx5e_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
756 pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
757 wqe = MLX5E_TX_FETCH_WQE(sq, pi);
758 mlx5e_txwqe_build_eseg_csum(sq, skb, NULL, &wqe->eth);
759 mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, xmit_more);
760}
761
762static void mlx5e_tx_wi_dma_unmap(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi,
763 u32 *dma_fifo_cc)
764{
765 int i;
766
767 for (i = 0; i < wi->num_dma; i++) {
768 struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++);
769
770 mlx5e_tx_dma_unmap(sq->pdev, dma);
771 }
772}
773
774static void mlx5e_consume_skb(struct mlx5e_txqsq *sq, struct sk_buff *skb,
775 struct mlx5_cqe64 *cqe, int napi_budget)
776{
777 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
778 struct skb_shared_hwtstamps hwts = {};
779 u64 ts = get_cqe_ts(cqe);
780
781 hwts.hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, ts);
782 if (sq->ptpsq)
783 mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_CQE_HWTSTAMP,
784 hwts.hwtstamp, sq->ptpsq->cq_stats);
785 else
786 skb_tstamp_tx(skb, &hwts);
787 }
788
789 napi_consume_skb(skb, napi_budget);
790}
791
792static void mlx5e_tx_wi_consume_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi,
793 struct mlx5_cqe64 *cqe, int napi_budget)
794{
795 int i;
796
797 for (i = 0; i < wi->num_fifo_pkts; i++) {
798 struct sk_buff *skb = mlx5e_skb_fifo_pop(&sq->db.skb_fifo);
799
800 mlx5e_consume_skb(sq, skb, cqe, napi_budget);
801 }
802}
803
804bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
805{
806 struct mlx5e_sq_stats *stats;
807 struct mlx5e_txqsq *sq;
808 struct mlx5_cqe64 *cqe;
809 u32 dma_fifo_cc;
810 u32 nbytes;
811 u16 npkts;
812 u16 sqcc;
813 int i;
814
815 sq = container_of(cq, struct mlx5e_txqsq, cq);
816
817 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
818 return false;
819
820 cqe = mlx5_cqwq_get_cqe(&cq->wq);
821 if (!cqe)
822 return false;
823
824 stats = sq->stats;
825
826 npkts = 0;
827 nbytes = 0;
828
829
830
831
832 sqcc = sq->cc;
833
834
835 dma_fifo_cc = sq->dma_fifo_cc;
836
837 i = 0;
838 do {
839 struct mlx5e_tx_wqe_info *wi;
840 u16 wqe_counter;
841 bool last_wqe;
842 u16 ci;
843
844 mlx5_cqwq_pop(&cq->wq);
845
846 wqe_counter = be16_to_cpu(cqe->wqe_counter);
847
848 do {
849 last_wqe = (sqcc == wqe_counter);
850
851 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
852 wi = &sq->db.wqe_info[ci];
853
854 sqcc += wi->num_wqebbs;
855
856 if (likely(wi->skb)) {
857 mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
858 mlx5e_consume_skb(sq, wi->skb, cqe, napi_budget);
859
860 npkts++;
861 nbytes += wi->num_bytes;
862 continue;
863 }
864
865 if (unlikely(mlx5e_ktls_tx_try_handle_resync_dump_comp(sq, wi,
866 &dma_fifo_cc)))
867 continue;
868
869 if (wi->num_fifo_pkts) {
870 mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
871 mlx5e_tx_wi_consume_fifo_skbs(sq, wi, cqe, napi_budget);
872
873 npkts += wi->num_fifo_pkts;
874 nbytes += wi->num_bytes;
875 }
876 } while (!last_wqe);
877
878 if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
879 if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
880 &sq->state)) {
881 mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
882 (struct mlx5_err_cqe *)cqe);
883 mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
884 queue_work(cq->priv->wq, &sq->recover_work);
885 }
886 stats->cqe_err++;
887 }
888
889 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
890
891 stats->cqes += i;
892
893 mlx5_cqwq_update_db_record(&cq->wq);
894
895
896 wmb();
897
898 sq->dma_fifo_cc = dma_fifo_cc;
899 sq->cc = sqcc;
900
901 netdev_tx_completed_queue(sq->txq, npkts, nbytes);
902
903 if (netif_tx_queue_stopped(sq->txq) &&
904 mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) &&
905 !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
906 netif_tx_wake_queue(sq->txq);
907 stats->wake++;
908 }
909
910 return (i == MLX5E_TX_CQ_POLL_BUDGET);
911}
912
913static void mlx5e_tx_wi_kfree_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi)
914{
915 int i;
916
917 for (i = 0; i < wi->num_fifo_pkts; i++)
918 dev_kfree_skb_any(mlx5e_skb_fifo_pop(&sq->db.skb_fifo));
919}
920
921void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
922{
923 struct mlx5e_tx_wqe_info *wi;
924 u32 dma_fifo_cc, nbytes = 0;
925 u16 ci, sqcc, npkts = 0;
926
927 sqcc = sq->cc;
928 dma_fifo_cc = sq->dma_fifo_cc;
929
930 while (sqcc != sq->pc) {
931 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
932 wi = &sq->db.wqe_info[ci];
933
934 sqcc += wi->num_wqebbs;
935
936 if (likely(wi->skb)) {
937 mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
938 dev_kfree_skb_any(wi->skb);
939
940 npkts++;
941 nbytes += wi->num_bytes;
942 continue;
943 }
944
945 if (unlikely(mlx5e_ktls_tx_try_handle_resync_dump_comp(sq, wi, &dma_fifo_cc)))
946 continue;
947
948 if (wi->num_fifo_pkts) {
949 mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
950 mlx5e_tx_wi_kfree_fifo_skbs(sq, wi);
951
952 npkts += wi->num_fifo_pkts;
953 nbytes += wi->num_bytes;
954 }
955 }
956
957 sq->dma_fifo_cc = dma_fifo_cc;
958 sq->cc = sqcc;
959
960 netdev_tx_completed_queue(sq->txq, npkts, nbytes);
961}
962
963#ifdef CONFIG_MLX5_CORE_IPOIB
964static inline void
965mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey,
966 struct mlx5_wqe_datagram_seg *dseg)
967{
968 memcpy(&dseg->av, av, sizeof(struct mlx5_av));
969 dseg->av.dqp_dct = cpu_to_be32(dqpn | MLX5_EXTENDED_UD_AV);
970 dseg->av.key.qkey.qkey = cpu_to_be32(dqkey);
971}
972
973static void mlx5i_sq_calc_wqe_attr(struct sk_buff *skb,
974 const struct mlx5e_tx_attr *attr,
975 struct mlx5e_tx_wqe_attr *wqe_attr)
976{
977 u16 ds_cnt = sizeof(struct mlx5i_tx_wqe) / MLX5_SEND_WQE_DS;
978 u16 ds_cnt_inl = 0;
979
980 ds_cnt += !!attr->headlen + skb_shinfo(skb)->nr_frags;
981
982 if (attr->ihs) {
983 u16 inl = attr->ihs - INL_HDR_START_SZ;
984
985 ds_cnt_inl = DIV_ROUND_UP(inl, MLX5_SEND_WQE_DS);
986 ds_cnt += ds_cnt_inl;
987 }
988
989 *wqe_attr = (struct mlx5e_tx_wqe_attr) {
990 .ds_cnt = ds_cnt,
991 .ds_cnt_inl = ds_cnt_inl,
992 .num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS),
993 };
994}
995
996void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
997 struct mlx5_av *av, u32 dqpn, u32 dqkey, bool xmit_more)
998{
999 struct mlx5e_tx_wqe_attr wqe_attr;
1000 struct mlx5e_tx_attr attr;
1001 struct mlx5i_tx_wqe *wqe;
1002
1003 struct mlx5_wqe_datagram_seg *datagram;
1004 struct mlx5_wqe_ctrl_seg *cseg;
1005 struct mlx5_wqe_eth_seg *eseg;
1006 struct mlx5_wqe_data_seg *dseg;
1007 struct mlx5e_tx_wqe_info *wi;
1008
1009 struct mlx5e_sq_stats *stats = sq->stats;
1010 int num_dma;
1011 u16 pi;
1012
1013 mlx5e_sq_xmit_prepare(sq, skb, NULL, &attr);
1014 mlx5i_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
1015
1016 pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
1017 wqe = MLX5I_SQ_FETCH_WQE(sq, pi);
1018
1019 stats->xmit_more += xmit_more;
1020
1021
1022 wi = &sq->db.wqe_info[pi];
1023 cseg = &wqe->ctrl;
1024 datagram = &wqe->datagram;
1025 eseg = &wqe->eth;
1026 dseg = wqe->data;
1027
1028 mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram);
1029
1030 mlx5e_txwqe_build_eseg_csum(sq, skb, NULL, eseg);
1031
1032 eseg->mss = attr.mss;
1033
1034 if (attr.ihs) {
1035 memcpy(eseg->inline_hdr.start, skb->data, attr.ihs);
1036 eseg->inline_hdr.sz = cpu_to_be16(attr.ihs);
1037 dseg += wqe_attr.ds_cnt_inl;
1038 }
1039
1040 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr.ihs,
1041 attr.headlen, dseg);
1042 if (unlikely(num_dma < 0))
1043 goto err_drop;
1044
1045 mlx5e_txwqe_complete(sq, skb, &attr, &wqe_attr, num_dma, wi, cseg, xmit_more);
1046
1047 return;
1048
1049err_drop:
1050 stats->dropped++;
1051 dev_kfree_skb_any(skb);
1052}
1053#endif
1054