1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/tcp.h>
34#include <linux/if_vlan.h>
35#include "en.h"
36
37#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
38#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
39 MLX5E_SQ_NOPS_ROOM)
40
41void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
42{
43 struct mlx5_wq_cyc *wq = &sq->wq;
44
45 u16 pi = sq->pc & wq->sz_m1;
46 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
47
48 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
49
50 memset(cseg, 0, sizeof(*cseg));
51
52 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
53 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | 0x01);
54
55 sq->skb[pi] = NULL;
56 sq->pc++;
57
58 if (notify_hw) {
59 cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
60 mlx5e_tx_notify_hw(sq, wqe, 0);
61 }
62}
63
64static inline void mlx5e_tx_dma_unmap(struct device *pdev,
65 struct mlx5e_sq_dma *dma)
66{
67 switch (dma->type) {
68 case MLX5E_DMA_MAP_SINGLE:
69 dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
70 break;
71 case MLX5E_DMA_MAP_PAGE:
72 dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
73 break;
74 default:
75 WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
76 }
77}
78
79static inline void mlx5e_dma_push(struct mlx5e_sq *sq,
80 dma_addr_t addr,
81 u32 size,
82 enum mlx5e_dma_map_type map_type)
83{
84 sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr;
85 sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size;
86 sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].type = map_type;
87 sq->dma_fifo_pc++;
88}
89
90static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_sq *sq, u32 i)
91{
92 return &sq->dma_fifo[i & sq->dma_fifo_mask];
93}
94
95static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, u8 num_dma)
96{
97 int i;
98
99 for (i = 0; i < num_dma; i++) {
100 struct mlx5e_sq_dma *last_pushed_dma =
101 mlx5e_dma_get(sq, --sq->dma_fifo_pc);
102
103 mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma);
104 }
105}
106
107u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
108 void *accel_priv, select_queue_fallback_t fallback)
109{
110 struct mlx5e_priv *priv = netdev_priv(dev);
111 int channel_ix = fallback(dev, skb);
112 int up = (netdev_get_num_tc(dev) && skb_vlan_tag_present(skb)) ?
113 skb->vlan_tci >> VLAN_PRIO_SHIFT : 0;
114
115 return priv->channeltc_to_txq_map[channel_ix][up];
116}
117
118static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
119 struct sk_buff *skb, bool bf)
120{
121
122
123
124
125#define MLX5E_MIN_INLINE ETH_HLEN
126
127 if (bf) {
128 u16 ihs = skb_headlen(skb);
129
130 if (skb_vlan_tag_present(skb))
131 ihs += VLAN_HLEN;
132
133 if (ihs <= sq->max_inline)
134 return skb_headlen(skb);
135 }
136
137 return MLX5E_MIN_INLINE;
138}
139
140static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
141 unsigned int *skb_len,
142 unsigned int len)
143{
144 *skb_len -= len;
145 *skb_data += len;
146}
147
148static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs,
149 unsigned char **skb_data,
150 unsigned int *skb_len)
151{
152 struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
153 int cpy1_sz = 2 * ETH_ALEN;
154 int cpy2_sz = ihs - cpy1_sz;
155
156 memcpy(vhdr, *skb_data, cpy1_sz);
157 mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy1_sz);
158 vhdr->h_vlan_proto = skb->vlan_proto;
159 vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
160 memcpy(&vhdr->h_vlan_encapsulated_proto, *skb_data, cpy2_sz);
161 mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy2_sz);
162}
163
164static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
165{
166 struct mlx5_wq_cyc *wq = &sq->wq;
167
168 u16 pi = sq->pc & wq->sz_m1;
169 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
170 struct mlx5e_tx_wqe_info *wi = &sq->wqe_info[pi];
171
172 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
173 struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
174 struct mlx5_wqe_data_seg *dseg;
175
176 unsigned char *skb_data = skb->data;
177 unsigned int skb_len = skb->len;
178 u8 opcode = MLX5_OPCODE_SEND;
179 dma_addr_t dma_addr = 0;
180 unsigned int num_bytes;
181 bool bf = false;
182 u16 headlen;
183 u16 ds_cnt;
184 u16 ihs;
185 int i;
186
187 memset(wqe, 0, sizeof(*wqe));
188
189 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
190 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
191 if (skb->encapsulation) {
192 eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
193 MLX5_ETH_WQE_L4_INNER_CSUM;
194 sq->stats.csum_offload_inner++;
195 } else {
196 eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
197 }
198 } else
199 sq->stats.csum_offload_none++;
200
201 if (sq->cc != sq->prev_cc) {
202 sq->prev_cc = sq->cc;
203 sq->bf_budget = (sq->cc == sq->pc) ? MLX5E_SQ_BF_BUDGET : 0;
204 }
205
206 if (skb_is_gso(skb)) {
207 eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
208 opcode = MLX5_OPCODE_LSO;
209
210 if (skb->encapsulation) {
211 ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
212 sq->stats.tso_inner_packets++;
213 sq->stats.tso_inner_bytes += skb->len - ihs;
214 } else {
215 ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
216 sq->stats.tso_packets++;
217 sq->stats.tso_bytes += skb->len - ihs;
218 }
219
220 num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
221 } else {
222 bf = sq->bf_budget &&
223 !skb->xmit_more &&
224 !skb_shinfo(skb)->nr_frags;
225 ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
226 num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
227 }
228
229 wi->num_bytes = num_bytes;
230
231 if (skb_vlan_tag_present(skb)) {
232 mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs, &skb_data,
233 &skb_len);
234 ihs += VLAN_HLEN;
235 } else {
236 memcpy(eseg->inline_hdr_start, skb_data, ihs);
237 mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
238 }
239
240 eseg->inline_hdr_sz = cpu_to_be16(ihs);
241
242 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
243 ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr_start),
244 MLX5_SEND_WQE_DS);
245 dseg = (struct mlx5_wqe_data_seg *)cseg + ds_cnt;
246
247 wi->num_dma = 0;
248
249 headlen = skb_len - skb->data_len;
250 if (headlen) {
251 dma_addr = dma_map_single(sq->pdev, skb_data, headlen,
252 DMA_TO_DEVICE);
253 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
254 goto dma_unmap_wqe_err;
255
256 dseg->addr = cpu_to_be64(dma_addr);
257 dseg->lkey = sq->mkey_be;
258 dseg->byte_count = cpu_to_be32(headlen);
259
260 mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
261 wi->num_dma++;
262
263 dseg++;
264 }
265
266 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
267 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
268 int fsz = skb_frag_size(frag);
269
270 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
271 DMA_TO_DEVICE);
272 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
273 goto dma_unmap_wqe_err;
274
275 dseg->addr = cpu_to_be64(dma_addr);
276 dseg->lkey = sq->mkey_be;
277 dseg->byte_count = cpu_to_be32(fsz);
278
279 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
280 wi->num_dma++;
281
282 dseg++;
283 }
284
285 ds_cnt += wi->num_dma;
286
287 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
288 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
289
290 sq->skb[pi] = skb;
291
292 wi->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
293 sq->pc += wi->num_wqebbs;
294
295 netdev_tx_sent_queue(sq->txq, wi->num_bytes);
296
297 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
298 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
299
300 if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM))) {
301 netif_tx_stop_queue(sq->txq);
302 sq->stats.stopped++;
303 }
304
305 if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
306 int bf_sz = 0;
307
308 if (bf && test_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state))
309 bf_sz = wi->num_wqebbs << 3;
310
311 cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
312 mlx5e_tx_notify_hw(sq, wqe, bf_sz);
313 }
314
315
316 while ((sq->pc & wq->sz_m1) > sq->edge)
317 mlx5e_send_nop(sq, false);
318
319 sq->bf_budget = bf ? sq->bf_budget - 1 : 0;
320
321 sq->stats.packets++;
322 sq->stats.bytes += num_bytes;
323 return NETDEV_TX_OK;
324
325dma_unmap_wqe_err:
326 sq->stats.dropped++;
327 mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);
328
329 dev_kfree_skb_any(skb);
330
331 return NETDEV_TX_OK;
332}
333
334netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
335{
336 struct mlx5e_priv *priv = netdev_priv(dev);
337 struct mlx5e_sq *sq = priv->txq_to_sq_map[skb_get_queue_mapping(skb)];
338
339 return mlx5e_sq_xmit(sq, skb);
340}
341
342bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
343{
344 struct mlx5e_sq *sq;
345 u32 dma_fifo_cc;
346 u32 nbytes;
347 u16 npkts;
348 u16 sqcc;
349 int i;
350
351 sq = container_of(cq, struct mlx5e_sq, cq);
352
353 npkts = 0;
354 nbytes = 0;
355
356
357
358
359 sqcc = sq->cc;
360
361
362 dma_fifo_cc = sq->dma_fifo_cc;
363
364 for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
365 struct mlx5_cqe64 *cqe;
366 u16 wqe_counter;
367 bool last_wqe;
368
369 cqe = mlx5e_get_cqe(cq);
370 if (!cqe)
371 break;
372
373 mlx5_cqwq_pop(&cq->wq);
374
375 wqe_counter = be16_to_cpu(cqe->wqe_counter);
376
377 do {
378 struct mlx5e_tx_wqe_info *wi;
379 struct sk_buff *skb;
380 u16 ci;
381 int j;
382
383 last_wqe = (sqcc == wqe_counter);
384
385 ci = sqcc & sq->wq.sz_m1;
386 skb = sq->skb[ci];
387 wi = &sq->wqe_info[ci];
388
389 if (unlikely(!skb)) {
390 sq->stats.nop++;
391 sqcc++;
392 continue;
393 }
394
395 if (unlikely(skb_shinfo(skb)->tx_flags &
396 SKBTX_HW_TSTAMP)) {
397 struct skb_shared_hwtstamps hwts = {};
398
399 mlx5e_fill_hwstamp(sq->tstamp,
400 get_cqe_ts(cqe), &hwts);
401 skb_tstamp_tx(skb, &hwts);
402 }
403
404 for (j = 0; j < wi->num_dma; j++) {
405 struct mlx5e_sq_dma *dma =
406 mlx5e_dma_get(sq, dma_fifo_cc++);
407
408 mlx5e_tx_dma_unmap(sq->pdev, dma);
409 }
410
411 npkts++;
412 nbytes += wi->num_bytes;
413 sqcc += wi->num_wqebbs;
414 napi_consume_skb(skb, napi_budget);
415 } while (!last_wqe);
416 }
417
418 mlx5_cqwq_update_db_record(&cq->wq);
419
420
421 wmb();
422
423 sq->dma_fifo_cc = dma_fifo_cc;
424 sq->cc = sqcc;
425
426 netdev_tx_completed_queue(sq->txq, npkts, nbytes);
427
428 if (netif_tx_queue_stopped(sq->txq) &&
429 mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM) &&
430 likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state))) {
431 netif_tx_wake_queue(sq->txq);
432 sq->stats.wake++;
433 }
434
435 return (i == MLX5E_TX_CQ_POLL_BUDGET);
436}
437