1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/tcp.h>
34#include <linux/if_vlan.h>
35#include "en.h"
36
37#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
38#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
39 MLX5E_SQ_NOPS_ROOM)
40
41void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
42{
43 struct mlx5_wq_cyc *wq = &sq->wq;
44
45 u16 pi = sq->pc & wq->sz_m1;
46 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
47
48 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
49
50 memset(cseg, 0, sizeof(*cseg));
51
52 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
53 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | 0x01);
54
55 sq->skb[pi] = NULL;
56 sq->pc++;
57 sq->stats.nop++;
58
59 if (notify_hw) {
60 cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
61 mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
62 }
63}
64
65static inline void mlx5e_tx_dma_unmap(struct device *pdev,
66 struct mlx5e_sq_dma *dma)
67{
68 switch (dma->type) {
69 case MLX5E_DMA_MAP_SINGLE:
70 dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
71 break;
72 case MLX5E_DMA_MAP_PAGE:
73 dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
74 break;
75 default:
76 WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
77 }
78}
79
80static inline void mlx5e_dma_push(struct mlx5e_sq *sq,
81 dma_addr_t addr,
82 u32 size,
83 enum mlx5e_dma_map_type map_type)
84{
85 sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr;
86 sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size;
87 sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].type = map_type;
88 sq->dma_fifo_pc++;
89}
90
91static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_sq *sq, u32 i)
92{
93 return &sq->dma_fifo[i & sq->dma_fifo_mask];
94}
95
96static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, u8 num_dma)
97{
98 int i;
99
100 for (i = 0; i < num_dma; i++) {
101 struct mlx5e_sq_dma *last_pushed_dma =
102 mlx5e_dma_get(sq, --sq->dma_fifo_pc);
103
104 mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma);
105 }
106}
107
108u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
109 void *accel_priv, select_queue_fallback_t fallback)
110{
111 struct mlx5e_priv *priv = netdev_priv(dev);
112 int channel_ix = fallback(dev, skb);
113 int up = 0;
114
115 if (!netdev_get_num_tc(dev))
116 return channel_ix;
117
118 if (skb_vlan_tag_present(skb))
119 up = skb->vlan_tci >> VLAN_PRIO_SHIFT;
120
121
122
123
124 if (channel_ix >= priv->params.num_channels)
125 channel_ix = reciprocal_scale(channel_ix,
126 priv->params.num_channels);
127
128 return priv->channeltc_to_txq_map[channel_ix][up];
129}
130
131static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
132{
133#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
134
135 return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
136}
137
138static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb)
139{
140 struct flow_keys keys;
141
142 if (skb_transport_header_was_set(skb))
143 return skb_transport_offset(skb);
144 else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
145 return keys.control.thoff;
146 else
147 return mlx5e_skb_l2_header_offset(skb);
148}
149
150static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
151 struct sk_buff *skb)
152{
153 int hlen;
154
155 switch (mode) {
156 case MLX5_INLINE_MODE_TCP_UDP:
157 hlen = eth_get_headlen(skb->data, skb_headlen(skb));
158 if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
159 hlen += VLAN_HLEN;
160 return hlen;
161 case MLX5_INLINE_MODE_IP:
162
163
164
165
166 if (skb_transport_offset(skb))
167 return mlx5e_skb_l3_header_offset(skb);
168
169 case MLX5_INLINE_MODE_L2:
170 default:
171 return mlx5e_skb_l2_header_offset(skb);
172 }
173}
174
175static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
176 struct sk_buff *skb, bool bf)
177{
178
179
180
181
182 if (bf) {
183 u16 ihs = skb_headlen(skb);
184
185 if (skb_vlan_tag_present(skb))
186 ihs += VLAN_HLEN;
187
188 if (ihs <= sq->max_inline)
189 return skb_headlen(skb);
190 }
191 return mlx5e_calc_min_inline(sq->min_inline_mode, skb);
192}
193
194static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
195 unsigned int *skb_len,
196 unsigned int len)
197{
198 *skb_len -= len;
199 *skb_data += len;
200}
201
202static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs,
203 unsigned char **skb_data,
204 unsigned int *skb_len)
205{
206 struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
207 int cpy1_sz = 2 * ETH_ALEN;
208 int cpy2_sz = ihs - cpy1_sz;
209
210 memcpy(vhdr, *skb_data, cpy1_sz);
211 mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy1_sz);
212 vhdr->h_vlan_proto = skb->vlan_proto;
213 vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
214 memcpy(&vhdr->h_vlan_encapsulated_proto, *skb_data, cpy2_sz);
215 mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy2_sz);
216}
217
218static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
219{
220 struct mlx5_wq_cyc *wq = &sq->wq;
221
222 u16 pi = sq->pc & wq->sz_m1;
223 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
224 struct mlx5e_tx_wqe_info *wi = &sq->wqe_info[pi];
225
226 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
227 struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
228 struct mlx5_wqe_data_seg *dseg;
229
230 unsigned char *skb_data = skb->data;
231 unsigned int skb_len = skb->len;
232 u8 opcode = MLX5_OPCODE_SEND;
233 dma_addr_t dma_addr = 0;
234 unsigned int num_bytes;
235 bool bf = false;
236 u16 headlen;
237 u16 ds_cnt;
238 u16 ihs;
239 int i;
240
241 memset(wqe, 0, sizeof(*wqe));
242
243 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
244 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
245 if (skb->encapsulation) {
246 eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
247 MLX5_ETH_WQE_L4_INNER_CSUM;
248 sq->stats.csum_partial_inner++;
249 } else {
250 eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
251 }
252 } else
253 sq->stats.csum_none++;
254
255 if (sq->cc != sq->prev_cc) {
256 sq->prev_cc = sq->cc;
257 sq->bf_budget = (sq->cc == sq->pc) ? MLX5E_SQ_BF_BUDGET : 0;
258 }
259
260 if (skb_is_gso(skb)) {
261 eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
262 opcode = MLX5_OPCODE_LSO;
263
264 if (skb->encapsulation) {
265 ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
266 sq->stats.tso_inner_packets++;
267 sq->stats.tso_inner_bytes += skb->len - ihs;
268 } else {
269 ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
270 sq->stats.tso_packets++;
271 sq->stats.tso_bytes += skb->len - ihs;
272 }
273
274 num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
275 } else {
276 bf = sq->bf_budget &&
277 !skb->xmit_more &&
278 !skb_shinfo(skb)->nr_frags;
279 ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
280 num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
281 }
282
283 wi->num_bytes = num_bytes;
284
285 if (skb_vlan_tag_present(skb)) {
286 mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs, &skb_data,
287 &skb_len);
288 ihs += VLAN_HLEN;
289 } else {
290 memcpy(eseg->inline_hdr_start, skb_data, ihs);
291 mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
292 }
293
294 eseg->inline_hdr_sz = cpu_to_be16(ihs);
295
296 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
297 ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr_start),
298 MLX5_SEND_WQE_DS);
299 dseg = (struct mlx5_wqe_data_seg *)cseg + ds_cnt;
300
301 wi->num_dma = 0;
302
303 headlen = skb_len - skb->data_len;
304 if (headlen) {
305 dma_addr = dma_map_single(sq->pdev, skb_data, headlen,
306 DMA_TO_DEVICE);
307 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
308 goto dma_unmap_wqe_err;
309
310 dseg->addr = cpu_to_be64(dma_addr);
311 dseg->lkey = sq->mkey_be;
312 dseg->byte_count = cpu_to_be32(headlen);
313
314 mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
315 wi->num_dma++;
316
317 dseg++;
318 }
319
320 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
321 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
322 int fsz = skb_frag_size(frag);
323
324 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
325 DMA_TO_DEVICE);
326 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
327 goto dma_unmap_wqe_err;
328
329 dseg->addr = cpu_to_be64(dma_addr);
330 dseg->lkey = sq->mkey_be;
331 dseg->byte_count = cpu_to_be32(fsz);
332
333 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
334 wi->num_dma++;
335
336 dseg++;
337 }
338
339 ds_cnt += wi->num_dma;
340
341 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
342 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
343
344 sq->skb[pi] = skb;
345
346 wi->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
347 sq->pc += wi->num_wqebbs;
348
349 netdev_tx_sent_queue(sq->txq, wi->num_bytes);
350
351 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
352 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
353
354 if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM))) {
355 netif_tx_stop_queue(sq->txq);
356 sq->stats.stopped++;
357 }
358
359 sq->stats.xmit_more += skb->xmit_more;
360 if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
361 int bf_sz = 0;
362
363 if (bf && test_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state))
364 bf_sz = wi->num_wqebbs << 3;
365
366 cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
367 mlx5e_tx_notify_hw(sq, &wqe->ctrl, bf_sz);
368 }
369
370
371 while ((sq->pc & wq->sz_m1) > sq->edge)
372 mlx5e_send_nop(sq, false);
373
374 if (bf)
375 sq->bf_budget--;
376
377 sq->stats.packets++;
378 sq->stats.bytes += num_bytes;
379 return NETDEV_TX_OK;
380
381dma_unmap_wqe_err:
382 sq->stats.dropped++;
383 mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);
384
385 dev_kfree_skb_any(skb);
386
387 return NETDEV_TX_OK;
388}
389
390netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
391{
392 struct mlx5e_priv *priv = netdev_priv(dev);
393 struct mlx5e_sq *sq = priv->txq_to_sq_map[skb_get_queue_mapping(skb)];
394
395 return mlx5e_sq_xmit(sq, skb);
396}
397
398bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
399{
400 struct mlx5e_sq *sq;
401 u32 dma_fifo_cc;
402 u32 nbytes;
403 u16 npkts;
404 u16 sqcc;
405 int i;
406
407 sq = container_of(cq, struct mlx5e_sq, cq);
408
409 if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state)))
410 return false;
411
412 npkts = 0;
413 nbytes = 0;
414
415
416
417
418 sqcc = sq->cc;
419
420
421 dma_fifo_cc = sq->dma_fifo_cc;
422
423 for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
424 struct mlx5_cqe64 *cqe;
425 u16 wqe_counter;
426 bool last_wqe;
427
428 cqe = mlx5e_get_cqe(cq);
429 if (!cqe)
430 break;
431
432 mlx5_cqwq_pop(&cq->wq);
433
434 wqe_counter = be16_to_cpu(cqe->wqe_counter);
435
436 do {
437 struct mlx5e_tx_wqe_info *wi;
438 struct sk_buff *skb;
439 u16 ci;
440 int j;
441
442 last_wqe = (sqcc == wqe_counter);
443
444 ci = sqcc & sq->wq.sz_m1;
445 skb = sq->skb[ci];
446 wi = &sq->wqe_info[ci];
447
448 if (unlikely(!skb)) {
449 sqcc++;
450 continue;
451 }
452
453 if (unlikely(skb_shinfo(skb)->tx_flags &
454 SKBTX_HW_TSTAMP)) {
455 struct skb_shared_hwtstamps hwts = {};
456
457 mlx5e_fill_hwstamp(sq->tstamp,
458 get_cqe_ts(cqe), &hwts);
459 skb_tstamp_tx(skb, &hwts);
460 }
461
462 for (j = 0; j < wi->num_dma; j++) {
463 struct mlx5e_sq_dma *dma =
464 mlx5e_dma_get(sq, dma_fifo_cc++);
465
466 mlx5e_tx_dma_unmap(sq->pdev, dma);
467 }
468
469 npkts++;
470 nbytes += wi->num_bytes;
471 sqcc += wi->num_wqebbs;
472 napi_consume_skb(skb, napi_budget);
473 } while (!last_wqe);
474 }
475
476 mlx5_cqwq_update_db_record(&cq->wq);
477
478
479 wmb();
480
481 sq->dma_fifo_cc = dma_fifo_cc;
482 sq->cc = sqcc;
483
484 netdev_tx_completed_queue(sq->txq, npkts, nbytes);
485
486 if (netif_tx_queue_stopped(sq->txq) &&
487 mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM)) {
488 netif_tx_wake_queue(sq->txq);
489 sq->stats.wake++;
490 }
491
492 return (i == MLX5E_TX_CQ_POLL_BUDGET);
493}
494
495void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
496{
497 struct mlx5e_tx_wqe_info *wi;
498 struct sk_buff *skb;
499 u16 ci;
500 int i;
501
502 while (sq->cc != sq->pc) {
503 ci = sq->cc & sq->wq.sz_m1;
504 skb = sq->skb[ci];
505 wi = &sq->wqe_info[ci];
506
507 if (!skb) {
508 sq->cc++;
509 continue;
510 }
511
512 for (i = 0; i < wi->num_dma; i++) {
513 struct mlx5e_sq_dma *dma =
514 mlx5e_dma_get(sq, sq->dma_fifo_cc++);
515
516 mlx5e_tx_dma_unmap(sq->pdev, dma);
517 }
518
519 dev_kfree_skb_any(skb);
520 sq->cc += wi->num_wqebbs;
521 }
522}
523