1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/ip.h>
34#include <linux/ipv6.h>
35#include <linux/tcp.h>
36#include <net/ip6_checksum.h>
37#include <net/page_pool.h>
38#include <net/inet_ecn.h>
39#include "en.h"
40#include "en/txrx.h"
41#include "en_tc.h"
42#include "eswitch.h"
43#include "en_rep.h"
44#include "en/rep/tc.h"
45#include "ipoib/ipoib.h"
46#include "accel/ipsec.h"
47#include "fpga/ipsec.h"
48#include "en_accel/ipsec_rxtx.h"
49#include "en_accel/tls_rxtx.h"
50#include "lib/clock.h"
51#include "en/xdp.h"
52#include "en/xsk/rx.h"
53#include "en/health.h"
54#include "en/params.h"
55#include "en/txrx.h"
56
57static struct sk_buff *
58mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
59 u16 cqe_bcnt, u32 head_offset, u32 page_idx);
60static struct sk_buff *
61mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
62 u16 cqe_bcnt, u32 head_offset, u32 page_idx);
63static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
64static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
65
66const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic = {
67 .handle_rx_cqe = mlx5e_handle_rx_cqe,
68 .handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
69};
70
71static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
72{
73 return config->rx_filter == HWTSTAMP_FILTER_ALL;
74}
75
76static inline void mlx5e_read_cqe_slot(struct mlx5_cqwq *wq,
77 u32 cqcc, void *data)
78{
79 u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc);
80
81 memcpy(data, mlx5_cqwq_get_wqe(wq, ci), sizeof(struct mlx5_cqe64));
82}
83
84static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq,
85 struct mlx5_cqwq *wq,
86 u32 cqcc)
87{
88 struct mlx5e_cq_decomp *cqd = &rq->cqd;
89 struct mlx5_cqe64 *title = &cqd->title;
90
91 mlx5e_read_cqe_slot(wq, cqcc, title);
92 cqd->left = be32_to_cpu(title->byte_cnt);
93 cqd->wqe_counter = be16_to_cpu(title->wqe_counter);
94 rq->stats->cqe_compress_blks++;
95}
96
97static inline void mlx5e_read_mini_arr_slot(struct mlx5_cqwq *wq,
98 struct mlx5e_cq_decomp *cqd,
99 u32 cqcc)
100{
101 mlx5e_read_cqe_slot(wq, cqcc, cqd->mini_arr);
102 cqd->mini_arr_idx = 0;
103}
104
105static inline void mlx5e_cqes_update_owner(struct mlx5_cqwq *wq, int n)
106{
107 u32 cqcc = wq->cc;
108 u8 op_own = mlx5_cqwq_get_ctr_wrap_cnt(wq, cqcc) & 1;
109 u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc);
110 u32 wq_sz = mlx5_cqwq_get_size(wq);
111 u32 ci_top = min_t(u32, wq_sz, ci + n);
112
113 for (; ci < ci_top; ci++, n--) {
114 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
115
116 cqe->op_own = op_own;
117 }
118
119 if (unlikely(ci == wq_sz)) {
120 op_own = !op_own;
121 for (ci = 0; ci < n; ci++) {
122 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
123
124 cqe->op_own = op_own;
125 }
126 }
127}
128
129static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
130 struct mlx5_cqwq *wq,
131 u32 cqcc)
132{
133 struct mlx5e_cq_decomp *cqd = &rq->cqd;
134 struct mlx5_mini_cqe8 *mini_cqe = &cqd->mini_arr[cqd->mini_arr_idx];
135 struct mlx5_cqe64 *title = &cqd->title;
136
137 title->byte_cnt = mini_cqe->byte_cnt;
138 title->check_sum = mini_cqe->checksum;
139 title->op_own &= 0xf0;
140 title->op_own |= 0x01 & (cqcc >> wq->fbc.log_sz);
141
142
143
144
145 if (test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state)) {
146 title->wqe_counter = mini_cqe->stridx;
147 return;
148 }
149
150
151 title->wqe_counter = cpu_to_be16(cqd->wqe_counter);
152 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
153 cqd->wqe_counter += mpwrq_get_cqe_consumed_strides(title);
154 else
155 cqd->wqe_counter =
156 mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cqd->wqe_counter + 1);
157}
158
159static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
160 struct mlx5_cqwq *wq,
161 u32 cqcc)
162{
163 struct mlx5e_cq_decomp *cqd = &rq->cqd;
164
165 mlx5e_decompress_cqe(rq, wq, cqcc);
166 cqd->title.rss_hash_type = 0;
167 cqd->title.rss_hash_result = 0;
168}
169
170static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
171 struct mlx5_cqwq *wq,
172 int update_owner_only,
173 int budget_rem)
174{
175 struct mlx5e_cq_decomp *cqd = &rq->cqd;
176 u32 cqcc = wq->cc + update_owner_only;
177 u32 cqe_count;
178 u32 i;
179
180 cqe_count = min_t(u32, cqd->left, budget_rem);
181
182 for (i = update_owner_only; i < cqe_count;
183 i++, cqd->mini_arr_idx++, cqcc++) {
184 if (cqd->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE)
185 mlx5e_read_mini_arr_slot(wq, cqd, cqcc);
186
187 mlx5e_decompress_cqe_no_hash(rq, wq, cqcc);
188 INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
189 mlx5e_handle_rx_cqe, rq, &cqd->title);
190 }
191 mlx5e_cqes_update_owner(wq, cqcc - wq->cc);
192 wq->cc = cqcc;
193 cqd->left -= cqe_count;
194 rq->stats->cqe_compress_pkts += cqe_count;
195
196 return cqe_count;
197}
198
199static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
200 struct mlx5_cqwq *wq,
201 int budget_rem)
202{
203 struct mlx5e_cq_decomp *cqd = &rq->cqd;
204 u32 cc = wq->cc;
205
206 mlx5e_read_title_slot(rq, wq, cc);
207 mlx5e_read_mini_arr_slot(wq, cqd, cc + 1);
208 mlx5e_decompress_cqe(rq, wq, cc);
209 INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
210 mlx5e_handle_rx_cqe, rq, &cqd->title);
211 cqd->mini_arr_idx++;
212
213 return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1;
214}
215
216static inline bool mlx5e_page_is_reserved(struct page *page)
217{
218 return page_is_pfmemalloc(page) || page_to_nid(page) != numa_mem_id();
219}
220
221static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
222 struct mlx5e_dma_info *dma_info)
223{
224 struct mlx5e_page_cache *cache = &rq->page_cache;
225 u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1);
226 struct mlx5e_rq_stats *stats = rq->stats;
227
228 if (tail_next == cache->head) {
229 stats->cache_full++;
230 return false;
231 }
232
233 if (unlikely(mlx5e_page_is_reserved(dma_info->page))) {
234 stats->cache_waive++;
235 return false;
236 }
237
238 cache->page_cache[cache->tail] = *dma_info;
239 cache->tail = tail_next;
240 return true;
241}
242
243static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq,
244 struct mlx5e_dma_info *dma_info)
245{
246 struct mlx5e_page_cache *cache = &rq->page_cache;
247 struct mlx5e_rq_stats *stats = rq->stats;
248
249 if (unlikely(cache->head == cache->tail)) {
250 stats->cache_empty++;
251 return false;
252 }
253
254 if (page_ref_count(cache->page_cache[cache->head].page) != 1) {
255 stats->cache_busy++;
256 return false;
257 }
258
259 *dma_info = cache->page_cache[cache->head];
260 cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1);
261 stats->cache_reuse++;
262
263 dma_sync_single_for_device(rq->pdev, dma_info->addr,
264 PAGE_SIZE,
265 DMA_FROM_DEVICE);
266 return true;
267}
268
269static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq,
270 struct mlx5e_dma_info *dma_info)
271{
272 if (mlx5e_rx_cache_get(rq, dma_info))
273 return 0;
274
275 dma_info->page = page_pool_dev_alloc_pages(rq->page_pool);
276 if (unlikely(!dma_info->page))
277 return -ENOMEM;
278
279 dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0,
280 PAGE_SIZE, rq->buff.map_dir);
281 if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
282 page_pool_recycle_direct(rq->page_pool, dma_info->page);
283 dma_info->page = NULL;
284 return -ENOMEM;
285 }
286
287 return 0;
288}
289
290static inline int mlx5e_page_alloc(struct mlx5e_rq *rq,
291 struct mlx5e_dma_info *dma_info)
292{
293 if (rq->xsk_pool)
294 return mlx5e_xsk_page_alloc_pool(rq, dma_info);
295 else
296 return mlx5e_page_alloc_pool(rq, dma_info);
297}
298
299void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info)
300{
301 dma_unmap_page(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir);
302}
303
304void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
305 struct mlx5e_dma_info *dma_info,
306 bool recycle)
307{
308 if (likely(recycle)) {
309 if (mlx5e_rx_cache_put(rq, dma_info))
310 return;
311
312 mlx5e_page_dma_unmap(rq, dma_info);
313 page_pool_recycle_direct(rq->page_pool, dma_info->page);
314 } else {
315 mlx5e_page_dma_unmap(rq, dma_info);
316 page_pool_release_page(rq->page_pool, dma_info->page);
317 put_page(dma_info->page);
318 }
319}
320
321static inline void mlx5e_page_release(struct mlx5e_rq *rq,
322 struct mlx5e_dma_info *dma_info,
323 bool recycle)
324{
325 if (rq->xsk_pool)
326
327
328
329
330 xsk_buff_free(dma_info->xsk);
331 else
332 mlx5e_page_release_dynamic(rq, dma_info, recycle);
333}
334
335static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
336 struct mlx5e_wqe_frag_info *frag)
337{
338 int err = 0;
339
340 if (!frag->offset)
341
342
343
344
345
346 err = mlx5e_page_alloc(rq, frag->di);
347
348 return err;
349}
350
351static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq,
352 struct mlx5e_wqe_frag_info *frag,
353 bool recycle)
354{
355 if (frag->last_in_page)
356 mlx5e_page_release(rq, frag->di, recycle);
357}
358
359static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix)
360{
361 return &rq->wqe.frags[ix << rq->wqe.info.log_num_frags];
362}
363
364static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
365 u16 ix)
366{
367 struct mlx5e_wqe_frag_info *frag = get_frag(rq, ix);
368 int err;
369 int i;
370
371 for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) {
372 err = mlx5e_get_rx_frag(rq, frag);
373 if (unlikely(err))
374 goto free_frags;
375
376 wqe->data[i].addr = cpu_to_be64(frag->di->addr +
377 frag->offset + rq->buff.headroom);
378 }
379
380 return 0;
381
382free_frags:
383 while (--i >= 0)
384 mlx5e_put_rx_frag(rq, --frag, true);
385
386 return err;
387}
388
389static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq,
390 struct mlx5e_wqe_frag_info *wi,
391 bool recycle)
392{
393 int i;
394
395 for (i = 0; i < rq->wqe.info.num_frags; i++, wi++)
396 mlx5e_put_rx_frag(rq, wi, recycle);
397}
398
399static void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
400{
401 struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix);
402
403 mlx5e_free_rx_wqe(rq, wi, false);
404}
405
406static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk)
407{
408 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
409 int err;
410 int i;
411
412 if (rq->xsk_pool) {
413 int pages_desired = wqe_bulk << rq->wqe.info.log_num_frags;
414
415
416
417
418
419 if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, pages_desired)))
420 return -ENOMEM;
421 }
422
423 for (i = 0; i < wqe_bulk; i++) {
424 struct mlx5e_rx_wqe_cyc *wqe = mlx5_wq_cyc_get_wqe(wq, ix + i);
425
426 err = mlx5e_alloc_rx_wqe(rq, wqe, ix + i);
427 if (unlikely(err))
428 goto free_wqes;
429 }
430
431 return 0;
432
433free_wqes:
434 while (--i >= 0)
435 mlx5e_dealloc_rx_wqe(rq, ix + i);
436
437 return err;
438}
439
440static inline void
441mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
442 struct mlx5e_dma_info *di, u32 frag_offset, u32 len,
443 unsigned int truesize)
444{
445 dma_sync_single_for_cpu(rq->pdev,
446 di->addr + frag_offset,
447 len, DMA_FROM_DEVICE);
448 page_ref_inc(di->page);
449 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
450 di->page, frag_offset, len, truesize);
451}
452
453static inline void
454mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb,
455 struct mlx5e_dma_info *dma_info,
456 int offset_from, u32 headlen)
457{
458 const void *from = page_address(dma_info->page) + offset_from;
459
460 unsigned int len = ALIGN(headlen, sizeof(long));
461
462 dma_sync_single_for_cpu(pdev, dma_info->addr + offset_from, len,
463 DMA_FROM_DEVICE);
464 skb_copy_to_linear_data(skb, from, len);
465}
466
467static void
468mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, bool recycle)
469{
470 bool no_xdp_xmit;
471 struct mlx5e_dma_info *dma_info = wi->umr.dma_info;
472 int i;
473
474
475 if (bitmap_full(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE))
476 return;
477
478 no_xdp_xmit = bitmap_empty(wi->xdp_xmit_bitmap,
479 MLX5_MPWRQ_PAGES_PER_WQE);
480
481 for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++)
482 if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap))
483 mlx5e_page_release(rq, &dma_info[i], recycle);
484}
485
486static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n)
487{
488 struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
489
490 do {
491 u16 next_wqe_index = mlx5_wq_ll_get_wqe_next_ix(wq, wq->head);
492
493 mlx5_wq_ll_push(wq, next_wqe_index);
494 } while (--n);
495
496
497 dma_wmb();
498
499 mlx5_wq_ll_update_db_record(wq);
500}
501
502static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
503{
504 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
505 struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0];
506 struct mlx5e_icosq *sq = &rq->channel->icosq;
507 struct mlx5_wq_cyc *wq = &sq->wq;
508 struct mlx5e_umr_wqe *umr_wqe;
509 u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1);
510 u16 pi;
511 int err;
512 int i;
513
514
515
516
517 if (rq->xsk_pool &&
518 unlikely(!xsk_buff_can_alloc(rq->xsk_pool, MLX5_MPWRQ_PAGES_PER_WQE))) {
519 err = -ENOMEM;
520 goto err;
521 }
522
523 pi = mlx5e_icosq_get_next_pi(sq, MLX5E_UMR_WQEBBS);
524 umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
525 memcpy(umr_wqe, &rq->mpwqe.umr_wqe, offsetof(struct mlx5e_umr_wqe, inline_mtts));
526
527 for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) {
528 err = mlx5e_page_alloc(rq, dma_info);
529 if (unlikely(err))
530 goto err_unmap;
531 umr_wqe->inline_mtts[i].ptag = cpu_to_be64(dma_info->addr | MLX5_EN_WR);
532 }
533
534 bitmap_zero(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
535 wi->consumed_strides = 0;
536
537 umr_wqe->ctrl.opmod_idx_opcode =
538 cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
539 MLX5_OPCODE_UMR);
540 umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset);
541
542 sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
543 .wqe_type = MLX5E_ICOSQ_WQE_UMR_RX,
544 .num_wqebbs = MLX5E_UMR_WQEBBS,
545 .umr.rq = rq,
546 };
547
548 sq->pc += MLX5E_UMR_WQEBBS;
549
550 sq->doorbell_cseg = &umr_wqe->ctrl;
551
552 return 0;
553
554err_unmap:
555 while (--i >= 0) {
556 dma_info--;
557 mlx5e_page_release(rq, dma_info, true);
558 }
559
560err:
561 rq->stats->buff_alloc_err++;
562
563 return err;
564}
565
566static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
567{
568 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
569
570 mlx5e_free_rx_mpwqe(rq, wi, false);
571}
572
573INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
574{
575 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
576 u8 wqe_bulk;
577 int err;
578
579 if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
580 return false;
581
582 wqe_bulk = rq->wqe.info.wqe_bulk;
583
584 if (mlx5_wq_cyc_missing(wq) < wqe_bulk)
585 return false;
586
587 do {
588 u16 head = mlx5_wq_cyc_get_head(wq);
589
590 err = mlx5e_alloc_rx_wqes(rq, head, wqe_bulk);
591 if (unlikely(err)) {
592 rq->stats->buff_alloc_err++;
593 break;
594 }
595
596 mlx5_wq_cyc_push_n(wq, wqe_bulk);
597 } while (mlx5_wq_cyc_missing(wq) >= wqe_bulk);
598
599
600 dma_wmb();
601
602 mlx5_wq_cyc_update_db_record(wq);
603
604 return !!err;
605}
606
607void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq)
608{
609 u16 sqcc;
610
611 sqcc = sq->cc;
612
613 while (sqcc != sq->pc) {
614 struct mlx5e_icosq_wqe_info *wi;
615 u16 ci;
616
617 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
618 wi = &sq->db.wqe_info[ci];
619 sqcc += wi->num_wqebbs;
620#ifdef CONFIG_MLX5_EN_TLS
621 switch (wi->wqe_type) {
622 case MLX5E_ICOSQ_WQE_SET_PSV_TLS:
623 mlx5e_ktls_handle_ctx_completion(wi);
624 break;
625 case MLX5E_ICOSQ_WQE_GET_PSV_TLS:
626 mlx5e_ktls_handle_get_psv_completion(wi, sq);
627 break;
628 }
629#endif
630 }
631 sq->cc = sqcc;
632}
633
634int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
635{
636 struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
637 struct mlx5_cqe64 *cqe;
638 u16 sqcc;
639 int i;
640
641 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
642 return 0;
643
644 cqe = mlx5_cqwq_get_cqe(&cq->wq);
645 if (likely(!cqe))
646 return 0;
647
648
649
650
651 sqcc = sq->cc;
652
653 i = 0;
654 do {
655 u16 wqe_counter;
656 bool last_wqe;
657
658 mlx5_cqwq_pop(&cq->wq);
659
660 wqe_counter = be16_to_cpu(cqe->wqe_counter);
661
662 do {
663 struct mlx5e_icosq_wqe_info *wi;
664 u16 ci;
665
666 last_wqe = (sqcc == wqe_counter);
667
668 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
669 wi = &sq->db.wqe_info[ci];
670 sqcc += wi->num_wqebbs;
671
672 if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
673 netdev_WARN_ONCE(cq->channel->netdev,
674 "Bad OP in ICOSQ CQE: 0x%x\n",
675 get_cqe_opcode(cqe));
676 mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
677 (struct mlx5_err_cqe *)cqe);
678 if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
679 queue_work(cq->channel->priv->wq, &sq->recover_work);
680 break;
681 }
682
683 switch (wi->wqe_type) {
684 case MLX5E_ICOSQ_WQE_UMR_RX:
685 wi->umr.rq->mpwqe.umr_completed++;
686 break;
687 case MLX5E_ICOSQ_WQE_NOP:
688 break;
689#ifdef CONFIG_MLX5_EN_TLS
690 case MLX5E_ICOSQ_WQE_UMR_TLS:
691 break;
692 case MLX5E_ICOSQ_WQE_SET_PSV_TLS:
693 mlx5e_ktls_handle_ctx_completion(wi);
694 break;
695 case MLX5E_ICOSQ_WQE_GET_PSV_TLS:
696 mlx5e_ktls_handle_get_psv_completion(wi, sq);
697 break;
698#endif
699 default:
700 netdev_WARN_ONCE(cq->channel->netdev,
701 "Bad WQE type in ICOSQ WQE info: 0x%x\n",
702 wi->wqe_type);
703 }
704 } while (!last_wqe);
705 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
706
707 sq->cc = sqcc;
708
709 mlx5_cqwq_update_db_record(&cq->wq);
710
711 return i;
712}
713
714INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
715{
716 struct mlx5e_icosq *sq = &rq->channel->icosq;
717 struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
718 u8 umr_completed = rq->mpwqe.umr_completed;
719 int alloc_err = 0;
720 u8 missing, i;
721 u16 head;
722
723 if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
724 return false;
725
726 if (umr_completed) {
727 mlx5e_post_rx_mpwqe(rq, umr_completed);
728 rq->mpwqe.umr_in_progress -= umr_completed;
729 rq->mpwqe.umr_completed = 0;
730 }
731
732 missing = mlx5_wq_ll_missing(wq) - rq->mpwqe.umr_in_progress;
733
734 if (unlikely(rq->mpwqe.umr_in_progress > rq->mpwqe.umr_last_bulk))
735 rq->stats->congst_umr++;
736
737#define UMR_WQE_BULK (2)
738 if (likely(missing < UMR_WQE_BULK))
739 return false;
740
741 head = rq->mpwqe.actual_wq_head;
742 i = missing;
743 do {
744 alloc_err = mlx5e_alloc_rx_mpwqe(rq, head);
745
746 if (unlikely(alloc_err))
747 break;
748 head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
749 } while (--i);
750
751 rq->mpwqe.umr_last_bulk = missing - i;
752 if (sq->doorbell_cseg) {
753 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg);
754 sq->doorbell_cseg = NULL;
755 }
756
757 rq->mpwqe.umr_in_progress += rq->mpwqe.umr_last_bulk;
758 rq->mpwqe.actual_wq_head = head;
759
760
761
762
763
764
765
766 if (unlikely(alloc_err == -ENOMEM && rq->xsk_pool))
767 return true;
768
769 return false;
770}
771
772static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp)
773{
774 u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
775 u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
776 (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
777
778 tcp->check = 0;
779 tcp->psh = get_cqe_lro_tcppsh(cqe);
780
781 if (tcp_ack) {
782 tcp->ack = 1;
783 tcp->ack_seq = cqe->lro_ack_seq_num;
784 tcp->window = cqe->lro_tcp_win;
785 }
786}
787
788static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
789 u32 cqe_bcnt)
790{
791 struct ethhdr *eth = (struct ethhdr *)(skb->data);
792 struct tcphdr *tcp;
793 int network_depth = 0;
794 __wsum check;
795 __be16 proto;
796 u16 tot_len;
797 void *ip_p;
798
799 proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
800
801 tot_len = cqe_bcnt - network_depth;
802 ip_p = skb->data + network_depth;
803
804 if (proto == htons(ETH_P_IP)) {
805 struct iphdr *ipv4 = ip_p;
806
807 tcp = ip_p + sizeof(struct iphdr);
808 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
809
810 ipv4->ttl = cqe->lro_min_ttl;
811 ipv4->tot_len = cpu_to_be16(tot_len);
812 ipv4->check = 0;
813 ipv4->check = ip_fast_csum((unsigned char *)ipv4,
814 ipv4->ihl);
815
816 mlx5e_lro_update_tcp_hdr(cqe, tcp);
817 check = csum_partial(tcp, tcp->doff * 4,
818 csum_unfold((__force __sum16)cqe->check_sum));
819
820 tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr,
821 tot_len - sizeof(struct iphdr),
822 IPPROTO_TCP, check);
823 } else {
824 u16 payload_len = tot_len - sizeof(struct ipv6hdr);
825 struct ipv6hdr *ipv6 = ip_p;
826
827 tcp = ip_p + sizeof(struct ipv6hdr);
828 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
829
830 ipv6->hop_limit = cqe->lro_min_ttl;
831 ipv6->payload_len = cpu_to_be16(payload_len);
832
833 mlx5e_lro_update_tcp_hdr(cqe, tcp);
834 check = csum_partial(tcp, tcp->doff * 4,
835 csum_unfold((__force __sum16)cqe->check_sum));
836
837 tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len,
838 IPPROTO_TCP, check);
839 }
840}
841
842static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
843 struct sk_buff *skb)
844{
845 u8 cht = cqe->rss_hash_type;
846 int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 :
847 (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 :
848 PKT_HASH_TYPE_NONE;
849 skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
850}
851
852static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth,
853 __be16 *proto)
854{
855 *proto = ((struct ethhdr *)skb->data)->h_proto;
856 *proto = __vlan_get_protocol(skb, *proto, network_depth);
857
858 if (*proto == htons(ETH_P_IP))
859 return pskb_may_pull(skb, *network_depth + sizeof(struct iphdr));
860
861 if (*proto == htons(ETH_P_IPV6))
862 return pskb_may_pull(skb, *network_depth + sizeof(struct ipv6hdr));
863
864 return false;
865}
866
867static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
868{
869 int network_depth = 0;
870 __be16 proto;
871 void *ip;
872 int rc;
873
874 if (unlikely(!is_last_ethertype_ip(skb, &network_depth, &proto)))
875 return;
876
877 ip = skb->data + network_depth;
878 rc = ((proto == htons(ETH_P_IP)) ? IP_ECN_set_ce((struct iphdr *)ip) :
879 IP6_ECN_set_ce(skb, (struct ipv6hdr *)ip));
880
881 rq->stats->ecn_mark += !!rc;
882}
883
884static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
885{
886 void *ip_p = skb->data + network_depth;
887
888 return (proto == htons(ETH_P_IP)) ? ((struct iphdr *)ip_p)->protocol :
889 ((struct ipv6hdr *)ip_p)->nexthdr;
890}
891
892#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
893
894#define MAX_PADDING 8
895
896static void
897tail_padding_csum_slow(struct sk_buff *skb, int offset, int len,
898 struct mlx5e_rq_stats *stats)
899{
900 stats->csum_complete_tail_slow++;
901 skb->csum = csum_block_add(skb->csum,
902 skb_checksum(skb, offset, len, 0),
903 offset);
904}
905
906static void
907tail_padding_csum(struct sk_buff *skb, int offset,
908 struct mlx5e_rq_stats *stats)
909{
910 u8 tail_padding[MAX_PADDING];
911 int len = skb->len - offset;
912 void *tail;
913
914 if (unlikely(len > MAX_PADDING)) {
915 tail_padding_csum_slow(skb, offset, len, stats);
916 return;
917 }
918
919 tail = skb_header_pointer(skb, offset, len, tail_padding);
920 if (unlikely(!tail)) {
921 tail_padding_csum_slow(skb, offset, len, stats);
922 return;
923 }
924
925 stats->csum_complete_tail++;
926 skb->csum = csum_block_add(skb->csum, csum_partial(tail, len, 0), offset);
927}
928
929static void
930mlx5e_skb_csum_fixup(struct sk_buff *skb, int network_depth, __be16 proto,
931 struct mlx5e_rq_stats *stats)
932{
933 struct ipv6hdr *ip6;
934 struct iphdr *ip4;
935 int pkt_len;
936
937
938 if (network_depth > ETH_HLEN)
939
940
941
942
943 skb->csum = csum_partial(skb->data + ETH_HLEN,
944 network_depth - ETH_HLEN,
945 skb->csum);
946
947
948 switch (proto) {
949 case htons(ETH_P_IP):
950 ip4 = (struct iphdr *)(skb->data + network_depth);
951 pkt_len = network_depth + ntohs(ip4->tot_len);
952 break;
953 case htons(ETH_P_IPV6):
954 ip6 = (struct ipv6hdr *)(skb->data + network_depth);
955 pkt_len = network_depth + sizeof(*ip6) + ntohs(ip6->payload_len);
956 break;
957 default:
958 return;
959 }
960
961 if (likely(pkt_len >= skb->len))
962 return;
963
964 tail_padding_csum(skb, pkt_len, stats);
965}
966
967static inline void mlx5e_handle_csum(struct net_device *netdev,
968 struct mlx5_cqe64 *cqe,
969 struct mlx5e_rq *rq,
970 struct sk_buff *skb,
971 bool lro)
972{
973 struct mlx5e_rq_stats *stats = rq->stats;
974 int network_depth = 0;
975 __be16 proto;
976
977 if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
978 goto csum_none;
979
980 if (lro) {
981 skb->ip_summed = CHECKSUM_UNNECESSARY;
982 stats->csum_unnecessary++;
983 return;
984 }
985
986
987 if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))
988 goto csum_unnecessary;
989
990
991
992
993
994
995
996
997
998 if (short_frame(skb->len))
999 goto csum_unnecessary;
1000
1001 if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
1002 u8 ipproto = get_ip_proto(skb, network_depth, proto);
1003
1004 if (unlikely(ipproto == IPPROTO_SCTP))
1005 goto csum_unnecessary;
1006
1007 if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
1008 goto csum_none;
1009
1010 stats->csum_complete++;
1011 skb->ip_summed = CHECKSUM_COMPLETE;
1012 skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
1013
1014 if (test_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state))
1015 return;
1016
1017
1018 mlx5e_skb_csum_fixup(skb, network_depth, proto, stats);
1019 return;
1020 }
1021
1022csum_unnecessary:
1023 if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
1024 (cqe->hds_ip_ext & CQE_L4_OK))) {
1025 skb->ip_summed = CHECKSUM_UNNECESSARY;
1026 if (cqe_is_tunneled(cqe)) {
1027 skb->csum_level = 1;
1028 skb->encapsulation = 1;
1029 stats->csum_unnecessary_inner++;
1030 return;
1031 }
1032 stats->csum_unnecessary++;
1033 return;
1034 }
1035csum_none:
1036 skb->ip_summed = CHECKSUM_NONE;
1037 stats->csum_none++;
1038}
1039
1040#define MLX5E_CE_BIT_MASK 0x80
1041
1042static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
1043 u32 cqe_bcnt,
1044 struct mlx5e_rq *rq,
1045 struct sk_buff *skb)
1046{
1047 u8 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
1048 struct mlx5e_rq_stats *stats = rq->stats;
1049 struct net_device *netdev = rq->netdev;
1050
1051 skb->mac_len = ETH_HLEN;
1052
1053 mlx5e_tls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
1054
1055 if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
1056 mlx5e_ipsec_offload_handle_rx_skb(netdev, skb, cqe);
1057
1058 if (lro_num_seg > 1) {
1059 mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
1060 skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
1061
1062
1063
1064 stats->packets += lro_num_seg - 1;
1065 stats->lro_packets++;
1066 stats->lro_bytes += cqe_bcnt;
1067 }
1068
1069 if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
1070 skb_hwtstamps(skb)->hwtstamp =
1071 mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe));
1072
1073 skb_record_rx_queue(skb, rq->ix);
1074
1075 if (likely(netdev->features & NETIF_F_RXHASH))
1076 mlx5e_skb_set_hash(cqe, skb);
1077
1078 if (cqe_has_vlan(cqe)) {
1079 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1080 be16_to_cpu(cqe->vlan_info));
1081 stats->removed_vlan_packets++;
1082 }
1083
1084 skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;
1085
1086 mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg);
1087
1088 if (unlikely(cqe->ml_path & MLX5E_CE_BIT_MASK))
1089 mlx5e_enable_ecn(rq, skb);
1090
1091 skb->protocol = eth_type_trans(skb, netdev);
1092
1093 if (unlikely(mlx5e_skb_is_multicast(skb)))
1094 stats->mcast_packets++;
1095}
1096
1097static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
1098 struct mlx5_cqe64 *cqe,
1099 u32 cqe_bcnt,
1100 struct sk_buff *skb)
1101{
1102 struct mlx5e_rq_stats *stats = rq->stats;
1103
1104 stats->packets++;
1105 stats->bytes += cqe_bcnt;
1106 mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
1107}
1108
1109static inline
1110struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
1111 u32 frag_size, u16 headroom,
1112 u32 cqe_bcnt)
1113{
1114 struct sk_buff *skb = build_skb(va, frag_size);
1115
1116 if (unlikely(!skb)) {
1117 rq->stats->buff_alloc_err++;
1118 return NULL;
1119 }
1120
1121 skb_reserve(skb, headroom);
1122 skb_put(skb, cqe_bcnt);
1123
1124 return skb;
1125}
1126
1127static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom,
1128 u32 len, struct xdp_buff *xdp)
1129{
1130 xdp->data_hard_start = va;
1131 xdp->data = va + headroom;
1132 xdp_set_data_meta_invalid(xdp);
1133 xdp->data_end = xdp->data + len;
1134 xdp->rxq = &rq->xdp_rxq;
1135 xdp->frame_sz = rq->buff.frame0_sz;
1136}
1137
1138static struct sk_buff *
1139mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
1140 struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
1141{
1142 struct mlx5e_dma_info *di = wi->di;
1143 u16 rx_headroom = rq->buff.headroom;
1144 struct xdp_buff xdp;
1145 struct sk_buff *skb;
1146 void *va, *data;
1147 u32 frag_size;
1148
1149 va = page_address(di->page) + wi->offset;
1150 data = va + rx_headroom;
1151 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
1152
1153 dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset,
1154 frag_size, DMA_FROM_DEVICE);
1155 net_prefetchw(va);
1156 net_prefetch(data);
1157
1158 mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
1159 if (mlx5e_xdp_handle(rq, di, &cqe_bcnt, &xdp))
1160 return NULL;
1161
1162 rx_headroom = xdp.data - xdp.data_hard_start;
1163 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
1164 skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt);
1165 if (unlikely(!skb))
1166 return NULL;
1167
1168
1169 page_ref_inc(di->page);
1170
1171 return skb;
1172}
1173
1174static struct sk_buff *
1175mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
1176 struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
1177{
1178 struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
1179 struct mlx5e_wqe_frag_info *head_wi = wi;
1180 u16 headlen = min_t(u32, MLX5E_RX_MAX_HEAD, cqe_bcnt);
1181 u16 frag_headlen = headlen;
1182 u16 byte_cnt = cqe_bcnt - headlen;
1183 struct sk_buff *skb;
1184
1185
1186
1187
1188 skb = napi_alloc_skb(rq->cq.napi,
1189 ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
1190 if (unlikely(!skb)) {
1191 rq->stats->buff_alloc_err++;
1192 return NULL;
1193 }
1194
1195 net_prefetchw(skb->data);
1196
1197 while (byte_cnt) {
1198 u16 frag_consumed_bytes =
1199 min_t(u16, frag_info->frag_size - frag_headlen, byte_cnt);
1200
1201 mlx5e_add_skb_frag(rq, skb, wi->di, wi->offset + frag_headlen,
1202 frag_consumed_bytes, frag_info->frag_stride);
1203 byte_cnt -= frag_consumed_bytes;
1204 frag_headlen = 0;
1205 frag_info++;
1206 wi++;
1207 }
1208
1209
1210 mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset, headlen);
1211
1212 skb->tail += headlen;
1213 skb->len += headlen;
1214
1215 return skb;
1216}
1217
1218static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1219{
1220 struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe;
1221
1222 if (cqe_syndrome_needs_recover(err_cqe->syndrome) &&
1223 !test_and_set_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state)) {
1224 mlx5e_dump_error_cqe(&rq->cq, rq->rqn, err_cqe);
1225 queue_work(rq->channel->priv->wq, &rq->recover_work);
1226 }
1227}
1228
1229static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1230{
1231 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1232 struct mlx5e_wqe_frag_info *wi;
1233 struct sk_buff *skb;
1234 u32 cqe_bcnt;
1235 u16 ci;
1236
1237 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1238 wi = get_frag(rq, ci);
1239 cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
1240
1241 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1242 trigger_report(rq, cqe);
1243 rq->stats->wqe_err++;
1244 goto free_wqe;
1245 }
1246
1247 skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
1248 mlx5e_skb_from_cqe_linear,
1249 mlx5e_skb_from_cqe_nonlinear,
1250 rq, cqe, wi, cqe_bcnt);
1251 if (!skb) {
1252
1253 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
1254
1255
1256
1257 goto wq_cyc_pop;
1258 }
1259 goto free_wqe;
1260 }
1261
1262 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1263
1264 if (mlx5e_cqe_regb_chain(cqe))
1265 if (!mlx5e_tc_update_skb(cqe, skb))
1266 goto free_wqe;
1267
1268 napi_gro_receive(rq->cq.napi, skb);
1269
1270free_wqe:
1271 mlx5e_free_rx_wqe(rq, wi, true);
1272wq_cyc_pop:
1273 mlx5_wq_cyc_pop(wq);
1274}
1275
1276#ifdef CONFIG_MLX5_ESWITCH
1277static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1278{
1279 struct net_device *netdev = rq->netdev;
1280 struct mlx5e_priv *priv = netdev_priv(netdev);
1281 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1282 struct mlx5_eswitch_rep *rep = rpriv->rep;
1283 struct mlx5e_tc_update_priv tc_priv = {};
1284 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1285 struct mlx5e_wqe_frag_info *wi;
1286 struct sk_buff *skb;
1287 u32 cqe_bcnt;
1288 u16 ci;
1289
1290 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1291 wi = get_frag(rq, ci);
1292 cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
1293
1294 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1295 rq->stats->wqe_err++;
1296 goto free_wqe;
1297 }
1298
1299 skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
1300 mlx5e_skb_from_cqe_linear,
1301 mlx5e_skb_from_cqe_nonlinear,
1302 rq, cqe, wi, cqe_bcnt);
1303 if (!skb) {
1304
1305 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
1306
1307
1308
1309 goto wq_cyc_pop;
1310 }
1311 goto free_wqe;
1312 }
1313
1314 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1315
1316 if (rep->vlan && skb_vlan_tag_present(skb))
1317 skb_vlan_pop(skb);
1318
1319 if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))
1320 goto free_wqe;
1321
1322 napi_gro_receive(rq->cq.napi, skb);
1323
1324 mlx5_rep_tc_post_napi_receive(&tc_priv);
1325
1326free_wqe:
1327 mlx5e_free_rx_wqe(rq, wi, true);
1328wq_cyc_pop:
1329 mlx5_wq_cyc_pop(wq);
1330}
1331
1332static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1333{
1334 u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
1335 u16 wqe_id = be16_to_cpu(cqe->wqe_id);
1336 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id];
1337 u16 stride_ix = mpwrq_get_cqe_stride_index(cqe);
1338 u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz;
1339 u32 head_offset = wqe_offset & (PAGE_SIZE - 1);
1340 u32 page_idx = wqe_offset >> PAGE_SHIFT;
1341 struct mlx5e_tc_update_priv tc_priv = {};
1342 struct mlx5e_rx_wqe_ll *wqe;
1343 struct mlx5_wq_ll *wq;
1344 struct sk_buff *skb;
1345 u16 cqe_bcnt;
1346
1347 wi->consumed_strides += cstrides;
1348
1349 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1350 trigger_report(rq, cqe);
1351 rq->stats->wqe_err++;
1352 goto mpwrq_cqe_out;
1353 }
1354
1355 if (unlikely(mpwrq_is_filler_cqe(cqe))) {
1356 struct mlx5e_rq_stats *stats = rq->stats;
1357
1358 stats->mpwqe_filler_cqes++;
1359 stats->mpwqe_filler_strides += cstrides;
1360 goto mpwrq_cqe_out;
1361 }
1362
1363 cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
1364
1365 skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq,
1366 mlx5e_skb_from_cqe_mpwrq_linear,
1367 mlx5e_skb_from_cqe_mpwrq_nonlinear,
1368 rq, wi, cqe_bcnt, head_offset, page_idx);
1369 if (!skb)
1370 goto mpwrq_cqe_out;
1371
1372 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1373
1374 if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))
1375 goto mpwrq_cqe_out;
1376
1377 napi_gro_receive(rq->cq.napi, skb);
1378
1379 mlx5_rep_tc_post_napi_receive(&tc_priv);
1380
1381mpwrq_cqe_out:
1382 if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
1383 return;
1384
1385 wq = &rq->mpwqe.wq;
1386 wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
1387 mlx5e_free_rx_mpwqe(rq, wi, true);
1388 mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
1389}
1390
1391const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep = {
1392 .handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
1393 .handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep,
1394};
1395#endif
1396
1397static struct sk_buff *
1398mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
1399 u16 cqe_bcnt, u32 head_offset, u32 page_idx)
1400{
1401 u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
1402 struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx];
1403 u32 frag_offset = head_offset + headlen;
1404 u32 byte_cnt = cqe_bcnt - headlen;
1405 struct mlx5e_dma_info *head_di = di;
1406 struct sk_buff *skb;
1407
1408 skb = napi_alloc_skb(rq->cq.napi,
1409 ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
1410 if (unlikely(!skb)) {
1411 rq->stats->buff_alloc_err++;
1412 return NULL;
1413 }
1414
1415 net_prefetchw(skb->data);
1416
1417 if (unlikely(frag_offset >= PAGE_SIZE)) {
1418 di++;
1419 frag_offset -= PAGE_SIZE;
1420 }
1421
1422 while (byte_cnt) {
1423 u32 pg_consumed_bytes =
1424 min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
1425 unsigned int truesize =
1426 ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
1427
1428 mlx5e_add_skb_frag(rq, skb, di, frag_offset,
1429 pg_consumed_bytes, truesize);
1430 byte_cnt -= pg_consumed_bytes;
1431 frag_offset = 0;
1432 di++;
1433 }
1434
1435 mlx5e_copy_skb_header(rq->pdev, skb, head_di, head_offset, headlen);
1436
1437 skb->tail += headlen;
1438 skb->len += headlen;
1439
1440 return skb;
1441}
1442
1443static struct sk_buff *
1444mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
1445 u16 cqe_bcnt, u32 head_offset, u32 page_idx)
1446{
1447 struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx];
1448 u16 rx_headroom = rq->buff.headroom;
1449 u32 cqe_bcnt32 = cqe_bcnt;
1450 struct xdp_buff xdp;
1451 struct sk_buff *skb;
1452 void *va, *data;
1453 u32 frag_size;
1454
1455
1456 if (unlikely(cqe_bcnt > rq->hw_mtu)) {
1457 rq->stats->oversize_pkts_sw_drop++;
1458 return NULL;
1459 }
1460
1461 va = page_address(di->page) + head_offset;
1462 data = va + rx_headroom;
1463 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
1464
1465 dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset,
1466 frag_size, DMA_FROM_DEVICE);
1467 net_prefetchw(va);
1468 net_prefetch(data);
1469
1470 mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt32, &xdp);
1471 if (mlx5e_xdp_handle(rq, di, &cqe_bcnt32, &xdp)) {
1472 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
1473 __set_bit(page_idx, wi->xdp_xmit_bitmap);
1474 return NULL;
1475 }
1476
1477 rx_headroom = xdp.data - xdp.data_hard_start;
1478 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
1479 skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32);
1480 if (unlikely(!skb))
1481 return NULL;
1482
1483
1484 page_ref_inc(di->page);
1485
1486 return skb;
1487}
1488
1489static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1490{
1491 u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
1492 u16 wqe_id = be16_to_cpu(cqe->wqe_id);
1493 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id];
1494 u16 stride_ix = mpwrq_get_cqe_stride_index(cqe);
1495 u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz;
1496 u32 head_offset = wqe_offset & (PAGE_SIZE - 1);
1497 u32 page_idx = wqe_offset >> PAGE_SHIFT;
1498 struct mlx5e_rx_wqe_ll *wqe;
1499 struct mlx5_wq_ll *wq;
1500 struct sk_buff *skb;
1501 u16 cqe_bcnt;
1502
1503 wi->consumed_strides += cstrides;
1504
1505 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1506 trigger_report(rq, cqe);
1507 rq->stats->wqe_err++;
1508 goto mpwrq_cqe_out;
1509 }
1510
1511 if (unlikely(mpwrq_is_filler_cqe(cqe))) {
1512 struct mlx5e_rq_stats *stats = rq->stats;
1513
1514 stats->mpwqe_filler_cqes++;
1515 stats->mpwqe_filler_strides += cstrides;
1516 goto mpwrq_cqe_out;
1517 }
1518
1519 cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
1520
1521 skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq,
1522 mlx5e_skb_from_cqe_mpwrq_linear,
1523 mlx5e_skb_from_cqe_mpwrq_nonlinear,
1524 rq, wi, cqe_bcnt, head_offset, page_idx);
1525 if (!skb)
1526 goto mpwrq_cqe_out;
1527
1528 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1529
1530 if (mlx5e_cqe_regb_chain(cqe))
1531 if (!mlx5e_tc_update_skb(cqe, skb))
1532 goto mpwrq_cqe_out;
1533
1534 napi_gro_receive(rq->cq.napi, skb);
1535
1536mpwrq_cqe_out:
1537 if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
1538 return;
1539
1540 wq = &rq->mpwqe.wq;
1541 wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
1542 mlx5e_free_rx_mpwqe(rq, wi, true);
1543 mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
1544}
1545
1546int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
1547{
1548 struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
1549 struct mlx5_cqwq *cqwq = &cq->wq;
1550 struct mlx5_cqe64 *cqe;
1551 int work_done = 0;
1552
1553 if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
1554 return 0;
1555
1556 if (rq->page_pool)
1557 page_pool_nid_changed(rq->page_pool, numa_mem_id());
1558
1559 if (rq->cqd.left) {
1560 work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget);
1561 if (rq->cqd.left || work_done >= budget)
1562 goto out;
1563 }
1564
1565 cqe = mlx5_cqwq_get_cqe(cqwq);
1566 if (!cqe) {
1567 if (unlikely(work_done))
1568 goto out;
1569 return 0;
1570 }
1571
1572 do {
1573 if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
1574 work_done +=
1575 mlx5e_decompress_cqes_start(rq, cqwq,
1576 budget - work_done);
1577 continue;
1578 }
1579
1580 mlx5_cqwq_pop(cqwq);
1581
1582 INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
1583 mlx5e_handle_rx_cqe, rq, cqe);
1584 } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
1585
1586out:
1587 if (rcu_access_pointer(rq->xdp_prog))
1588 mlx5e_xdp_rx_poll_complete(rq);
1589
1590 mlx5_cqwq_update_db_record(cqwq);
1591
1592
1593 wmb();
1594
1595 return work_done;
1596}
1597
1598#ifdef CONFIG_MLX5_CORE_IPOIB
1599
1600#define MLX5_IB_GRH_SGID_OFFSET 8
1601#define MLX5_IB_GRH_DGID_OFFSET 24
1602#define MLX5_GID_SIZE 16
1603
1604static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
1605 struct mlx5_cqe64 *cqe,
1606 u32 cqe_bcnt,
1607 struct sk_buff *skb)
1608{
1609 struct hwtstamp_config *tstamp;
1610 struct mlx5e_rq_stats *stats;
1611 struct net_device *netdev;
1612 struct mlx5e_priv *priv;
1613 char *pseudo_header;
1614 u32 flags_rqpn;
1615 u32 qpn;
1616 u8 *dgid;
1617 u8 g;
1618
1619 qpn = be32_to_cpu(cqe->sop_drop_qpn) & 0xffffff;
1620 netdev = mlx5i_pkey_get_netdev(rq->netdev, qpn);
1621
1622
1623
1624
1625 if (unlikely(!netdev)) {
1626
1627 skb->dev = NULL;
1628 pr_warn_once("Unable to map QPN %u to dev - dropping skb\n", qpn);
1629 return;
1630 }
1631
1632 priv = mlx5i_epriv(netdev);
1633 tstamp = &priv->tstamp;
1634 stats = &priv->channel_stats[rq->ix].rq;
1635
1636 flags_rqpn = be32_to_cpu(cqe->flags_rqpn);
1637 g = (flags_rqpn >> 28) & 3;
1638 dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET;
1639 if ((!g) || dgid[0] != 0xff)
1640 skb->pkt_type = PACKET_HOST;
1641 else if (memcmp(dgid, netdev->broadcast + 4, MLX5_GID_SIZE) == 0)
1642 skb->pkt_type = PACKET_BROADCAST;
1643 else
1644 skb->pkt_type = PACKET_MULTICAST;
1645
1646
1647
1648
1649 if (g && (qpn == (flags_rqpn & 0xffffff)) &&
1650 (memcmp(netdev->dev_addr + 4, skb->data + MLX5_IB_GRH_SGID_OFFSET,
1651 MLX5_GID_SIZE) == 0)) {
1652 skb->dev = NULL;
1653 return;
1654 }
1655
1656 skb_pull(skb, MLX5_IB_GRH_BYTES);
1657
1658 skb->protocol = *((__be16 *)(skb->data));
1659
1660 if (netdev->features & NETIF_F_RXCSUM) {
1661 skb->ip_summed = CHECKSUM_COMPLETE;
1662 skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
1663 stats->csum_complete++;
1664 } else {
1665 skb->ip_summed = CHECKSUM_NONE;
1666 stats->csum_none++;
1667 }
1668
1669 if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
1670 skb_hwtstamps(skb)->hwtstamp =
1671 mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe));
1672
1673 skb_record_rx_queue(skb, rq->ix);
1674
1675 if (likely(netdev->features & NETIF_F_RXHASH))
1676 mlx5e_skb_set_hash(cqe, skb);
1677
1678
1679 pseudo_header = skb_push(skb, MLX5_IPOIB_PSEUDO_LEN);
1680 memset(pseudo_header, 0, MLX5_IPOIB_PSEUDO_LEN);
1681 skb_reset_mac_header(skb);
1682 skb_pull(skb, MLX5_IPOIB_HARD_LEN);
1683
1684 skb->dev = netdev;
1685
1686 stats->packets++;
1687 stats->bytes += cqe_bcnt;
1688}
1689
1690static void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1691{
1692 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1693 struct mlx5e_wqe_frag_info *wi;
1694 struct sk_buff *skb;
1695 u32 cqe_bcnt;
1696 u16 ci;
1697
1698 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1699 wi = get_frag(rq, ci);
1700 cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
1701
1702 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1703 rq->stats->wqe_err++;
1704 goto wq_free_wqe;
1705 }
1706
1707 skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
1708 mlx5e_skb_from_cqe_linear,
1709 mlx5e_skb_from_cqe_nonlinear,
1710 rq, cqe, wi, cqe_bcnt);
1711 if (!skb)
1712 goto wq_free_wqe;
1713
1714 mlx5i_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1715 if (unlikely(!skb->dev)) {
1716 dev_kfree_skb_any(skb);
1717 goto wq_free_wqe;
1718 }
1719 napi_gro_receive(rq->cq.napi, skb);
1720
1721wq_free_wqe:
1722 mlx5e_free_rx_wqe(rq, wi, true);
1723 mlx5_wq_cyc_pop(wq);
1724}
1725
1726const struct mlx5e_rx_handlers mlx5i_rx_handlers = {
1727 .handle_rx_cqe = mlx5i_handle_rx_cqe,
1728 .handle_rx_cqe_mpwqe = NULL,
1729};
1730#endif
1731
1732#ifdef CONFIG_MLX5_EN_IPSEC
1733
1734static void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1735{
1736 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1737 struct mlx5e_wqe_frag_info *wi;
1738 struct sk_buff *skb;
1739 u32 cqe_bcnt;
1740 u16 ci;
1741
1742 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1743 wi = get_frag(rq, ci);
1744 cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
1745
1746 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1747 rq->stats->wqe_err++;
1748 goto wq_free_wqe;
1749 }
1750
1751 skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
1752 mlx5e_skb_from_cqe_linear,
1753 mlx5e_skb_from_cqe_nonlinear,
1754 rq, cqe, wi, cqe_bcnt);
1755 if (unlikely(!skb))
1756 goto wq_free_wqe;
1757
1758 skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb, &cqe_bcnt);
1759 if (unlikely(!skb))
1760 goto wq_free_wqe;
1761
1762 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1763 napi_gro_receive(rq->cq.napi, skb);
1764
1765wq_free_wqe:
1766 mlx5e_free_rx_wqe(rq, wi, true);
1767 mlx5_wq_cyc_pop(wq);
1768}
1769
1770#endif
1771
1772int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk)
1773{
1774 struct mlx5_core_dev *mdev = rq->mdev;
1775 struct mlx5e_channel *c = rq->channel;
1776
1777 switch (rq->wq_type) {
1778 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
1779 rq->mpwqe.skb_from_cqe_mpwrq = xsk ?
1780 mlx5e_xsk_skb_from_cqe_mpwrq_linear :
1781 mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ?
1782 mlx5e_skb_from_cqe_mpwrq_linear :
1783 mlx5e_skb_from_cqe_mpwrq_nonlinear;
1784 rq->post_wqes = mlx5e_post_rx_mpwqes;
1785 rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
1786
1787 rq->handle_rx_cqe = c->priv->profile->rx_handlers->handle_rx_cqe_mpwqe;
1788#ifdef CONFIG_MLX5_EN_IPSEC
1789 if (MLX5_IPSEC_DEV(mdev)) {
1790 netdev_err(c->netdev, "MPWQE RQ with IPSec offload not supported\n");
1791 return -EINVAL;
1792 }
1793#endif
1794 if (!rq->handle_rx_cqe) {
1795 netdev_err(c->netdev, "RX handler of MPWQE RQ is not set\n");
1796 return -EINVAL;
1797 }
1798 break;
1799 default:
1800 rq->wqe.skb_from_cqe = xsk ?
1801 mlx5e_xsk_skb_from_cqe_linear :
1802 mlx5e_rx_is_linear_skb(params, NULL) ?
1803 mlx5e_skb_from_cqe_linear :
1804 mlx5e_skb_from_cqe_nonlinear;
1805 rq->post_wqes = mlx5e_post_rx_wqes;
1806 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
1807
1808#ifdef CONFIG_MLX5_EN_IPSEC
1809 if ((mlx5_fpga_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) &&
1810 c->priv->ipsec)
1811 rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
1812 else
1813#endif
1814 rq->handle_rx_cqe = c->priv->profile->rx_handlers->handle_rx_cqe;
1815 if (!rq->handle_rx_cqe) {
1816 netdev_err(c->netdev, "RX handler of RQ is not set\n");
1817 return -EINVAL;
1818 }
1819 }
1820
1821 return 0;
1822}
1823