1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#undef CONFIG_BPF_SYSCALL
34
35
36
37
38#define NETIF_F_HW_ESP 0
39
40#include <net/tc_act/tc_gact.h>
41#include <net/pkt_cls.h>
42#include <linux/mlx5/fs.h>
43#include <net/vxlan.h>
44#include <linux/bpf.h>
45#include <net/page_pool.h>
46#include "eswitch.h"
47#include "en.h"
48#include "en_tc.h"
49#include "en_rep.h"
50#include "en_accel/ipsec.h"
51#include "en_accel/ipsec_rxtx.h"
52#include "en_accel/tls.h"
53#include "accel/ipsec.h"
54#include "accel/tls.h"
55#include "lib/vxlan.h"
56#include "lib/clock.h"
57#include "en/port.h"
58#include "en/xdp.h"
59#include "lib/eq.h"
60
61struct mlx5e_rq_param {
62 u32 rqc[MLX5_ST_SZ_DW(rqc)];
63 struct mlx5_wq_param wq;
64 struct mlx5e_rq_frags_info frags_info;
65};
66
67struct mlx5e_sq_param {
68 u32 sqc[MLX5_ST_SZ_DW(sqc)];
69 struct mlx5_wq_param wq;
70};
71
72struct mlx5e_cq_param {
73 u32 cqc[MLX5_ST_SZ_DW(cqc)];
74 struct mlx5_wq_param wq;
75 u16 eq_ix;
76 u8 cq_period_mode;
77};
78
79struct mlx5e_channel_param {
80 struct mlx5e_rq_param rq;
81 struct mlx5e_sq_param sq;
82 struct mlx5e_sq_param xdp_sq;
83 struct mlx5e_sq_param icosq;
84 struct mlx5e_cq_param rx_cq;
85 struct mlx5e_cq_param tx_cq;
86 struct mlx5e_cq_param icosq_cq;
87};
88
89bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
90{
91 bool striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) &&
92 MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
93 MLX5_CAP_ETH(mdev, reg_umr_sq);
94 u16 max_wqe_sz_cap = MLX5_CAP_GEN(mdev, max_wqe_sz_sq);
95 bool inline_umr = MLX5E_UMR_WQE_INLINE_SZ <= max_wqe_sz_cap;
96
97 if (!striding_rq_umr)
98 return false;
99 if (!inline_umr) {
100 mlx5_core_warn(mdev, "Cannot support Striding RQ: UMR WQE size (%d) exceeds maximum supported (%d).\n",
101 (int)MLX5E_UMR_WQE_INLINE_SZ, max_wqe_sz_cap);
102 return false;
103 }
104 return true;
105}
106
107static u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params)
108{
109 u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
110 u16 linear_rq_headroom = params->xdp_prog ?
111 XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
112 u32 frag_sz;
113
114 linear_rq_headroom += NET_IP_ALIGN;
115
116 frag_sz = MLX5_SKB_FRAG_SZ(linear_rq_headroom + hw_mtu);
117
118 if (params->xdp_prog && frag_sz < PAGE_SIZE)
119 frag_sz = PAGE_SIZE;
120
121 return frag_sz;
122}
123
124static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params)
125{
126 u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params);
127
128 return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz);
129}
130
131static bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
132 struct mlx5e_params *params)
133{
134 u32 frag_sz = mlx5e_rx_get_linear_frag_sz(params);
135
136 return !params->lro_en && frag_sz <= PAGE_SIZE;
137}
138
139#define MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ ((BIT(__mlx5_bit_sz(wq, log_wqe_stride_size)) - 1) + \
140 MLX5_MPWQE_LOG_STRIDE_SZ_BASE)
141static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
142 struct mlx5e_params *params)
143{
144 u32 frag_sz = mlx5e_rx_get_linear_frag_sz(params);
145 s8 signed_log_num_strides_param;
146 u8 log_num_strides;
147
148 if (!mlx5e_rx_is_linear_skb(mdev, params))
149 return false;
150
151 if (order_base_2(frag_sz) > MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ)
152 return false;
153
154 if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
155 return true;
156
157 log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(frag_sz);
158 signed_log_num_strides_param =
159 (s8)log_num_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE;
160
161 return signed_log_num_strides_param >= 0;
162}
163
164static u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params)
165{
166 if (params->log_rq_mtu_frames <
167 mlx5e_mpwqe_log_pkts_per_wqe(params) + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW)
168 return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
169
170 return params->log_rq_mtu_frames - mlx5e_mpwqe_log_pkts_per_wqe(params);
171}
172
173static u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
174 struct mlx5e_params *params)
175{
176 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params))
177 return order_base_2(mlx5e_rx_get_linear_frag_sz(params));
178
179 return MLX5E_MPWQE_STRIDE_SZ(mdev,
180 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
181}
182
183static u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
184 struct mlx5e_params *params)
185{
186 return MLX5_MPWRQ_LOG_WQE_SZ -
187 mlx5e_mpwqe_get_log_stride_size(mdev, params);
188}
189
190static u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
191 struct mlx5e_params *params)
192{
193 u16 linear_rq_headroom = params->xdp_prog ?
194 XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
195 bool is_linear_skb;
196
197 linear_rq_headroom += NET_IP_ALIGN;
198
199 is_linear_skb = (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC) ?
200 mlx5e_rx_is_linear_skb(mdev, params) :
201 mlx5e_rx_mpwqe_is_linear_skb(mdev, params);
202
203 return is_linear_skb ? linear_rq_headroom : 0;
204}
205
206void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
207 struct mlx5e_params *params)
208{
209 params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
210 params->log_rq_mtu_frames = is_kdump_kernel() ?
211 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
212 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
213
214 mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
215 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
216 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
217 BIT(mlx5e_mpwqe_get_log_rq_size(params)) :
218 BIT(params->log_rq_mtu_frames),
219 BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params)),
220 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
221}
222
223bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
224 struct mlx5e_params *params)
225{
226 return mlx5e_check_fragmented_striding_rq_cap(mdev) &&
227 !MLX5_IPSEC_DEV(mdev) &&
228 !(params->xdp_prog && !mlx5e_rx_mpwqe_is_linear_skb(mdev, params));
229}
230
231void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
232{
233 params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) &&
234 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
235 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
236 MLX5_WQ_TYPE_CYCLIC;
237}
238
239static void mlx5e_update_carrier(struct mlx5e_priv *priv)
240{
241 struct mlx5_core_dev *mdev = priv->mdev;
242 u8 port_state;
243
244 port_state = mlx5_query_vport_state(mdev,
245 MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT,
246 0);
247
248 if (port_state == VPORT_STATE_UP) {
249 netdev_info(priv->netdev, "Link up\n");
250 netif_carrier_on(priv->netdev);
251 } else {
252 netdev_info(priv->netdev, "Link down\n");
253 netif_carrier_off(priv->netdev);
254 }
255}
256
257static void mlx5e_update_carrier_work(struct work_struct *work)
258{
259 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
260 update_carrier_work);
261
262 mutex_lock(&priv->state_lock);
263 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
264 if (priv->profile->update_carrier)
265 priv->profile->update_carrier(priv);
266 mutex_unlock(&priv->state_lock);
267}
268
269void mlx5e_update_stats(struct mlx5e_priv *priv)
270{
271 int i;
272
273 for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
274 if (mlx5e_stats_grps[i].update_stats)
275 mlx5e_stats_grps[i].update_stats(priv);
276}
277
278static void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
279{
280 int i;
281
282 for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
283 if (mlx5e_stats_grps[i].update_stats_mask &
284 MLX5E_NDO_UPDATE_STATS)
285 mlx5e_stats_grps[i].update_stats(priv);
286}
287
288static void mlx5e_update_stats_work(struct work_struct *work)
289{
290 struct delayed_work *dwork = to_delayed_work(work);
291 struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
292 update_stats_work);
293 mutex_lock(&priv->state_lock);
294 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
295 priv->profile->update_stats(priv);
296 queue_delayed_work(priv->wq, dwork,
297 msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
298 }
299 mutex_unlock(&priv->state_lock);
300}
301
302static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
303 enum mlx5_dev_event event, unsigned long param)
304{
305 struct mlx5e_priv *priv = vpriv;
306
307 if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
308 return;
309
310 switch (event) {
311 case MLX5_DEV_EVENT_PORT_UP:
312 case MLX5_DEV_EVENT_PORT_DOWN:
313 queue_work(priv->wq, &priv->update_carrier_work);
314 break;
315 default:
316 break;
317 }
318}
319
320static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
321{
322 set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
323}
324
325static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
326{
327 clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
328 mlx5_eq_synchronize_async_irq(priv->mdev);
329}
330
331static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
332 struct mlx5e_icosq *sq,
333 struct mlx5e_umr_wqe *wqe)
334{
335 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
336 struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
337 u8 ds_cnt = DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_DS);
338
339 cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
340 ds_cnt);
341 cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
342 cseg->imm = rq->mkey_be;
343
344 ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
345 ucseg->xlt_octowords =
346 cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
347 ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
348}
349
350static u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq)
351{
352 switch (rq->wq_type) {
353 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
354 return mlx5_wq_ll_get_size(&rq->mpwqe.wq);
355 default:
356 return mlx5_wq_cyc_get_size(&rq->wqe.wq);
357 }
358}
359
360static u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq)
361{
362 switch (rq->wq_type) {
363 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
364 return rq->mpwqe.wq.cur_sz;
365 default:
366 return rq->wqe.wq.cur_sz;
367 }
368}
369
370static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
371 struct mlx5e_channel *c)
372{
373 int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
374
375 rq->mpwqe.info = kvzalloc_node(array_size(wq_sz,
376 sizeof(*rq->mpwqe.info)),
377 GFP_KERNEL, cpu_to_node(c->cpu));
378 if (!rq->mpwqe.info)
379 return -ENOMEM;
380
381 mlx5e_build_umr_wqe(rq, &c->icosq, &rq->mpwqe.umr_wqe);
382
383 return 0;
384}
385
386static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
387 u64 npages, u8 page_shift,
388 struct mlx5_core_mkey *umr_mkey)
389{
390 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
391 void *mkc;
392 u32 *in;
393 int err;
394
395 in = kvzalloc(inlen, GFP_KERNEL);
396 if (!in)
397 return -ENOMEM;
398
399 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
400
401 MLX5_SET(mkc, mkc, free, 1);
402 MLX5_SET(mkc, mkc, umr_en, 1);
403 MLX5_SET(mkc, mkc, lw, 1);
404 MLX5_SET(mkc, mkc, lr, 1);
405 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
406
407 MLX5_SET(mkc, mkc, qpn, 0xffffff);
408 MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
409 MLX5_SET64(mkc, mkc, len, npages << page_shift);
410 MLX5_SET(mkc, mkc, translations_octword_size,
411 MLX5_MTT_OCTW(npages));
412 MLX5_SET(mkc, mkc, log_page_size, page_shift);
413
414 err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
415
416 kvfree(in);
417 return err;
418}
419
420static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
421{
422 u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->mpwqe.wq));
423
424 return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey);
425}
426
427static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix)
428{
429 return (wqe_ix << MLX5E_LOG_ALIGNED_MPWQE_PPW) << PAGE_SHIFT;
430}
431
432static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
433{
434 struct mlx5e_wqe_frag_info next_frag, *prev;
435 int i;
436
437 next_frag.di = &rq->wqe.di[0];
438 next_frag.offset = 0;
439 prev = NULL;
440
441 for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) {
442 struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
443 struct mlx5e_wqe_frag_info *frag =
444 &rq->wqe.frags[i << rq->wqe.info.log_num_frags];
445 int f;
446
447 for (f = 0; f < rq->wqe.info.num_frags; f++, frag++) {
448 if (next_frag.offset + frag_info[f].frag_stride > PAGE_SIZE) {
449 next_frag.di++;
450 next_frag.offset = 0;
451 if (prev)
452 prev->last_in_page = true;
453 }
454 *frag = next_frag;
455
456
457 next_frag.offset += frag_info[f].frag_stride;
458 prev = frag;
459 }
460 }
461
462 if (prev)
463 prev->last_in_page = true;
464}
465
466static int mlx5e_init_di_list(struct mlx5e_rq *rq,
467 struct mlx5e_params *params,
468 int wq_sz, int cpu)
469{
470 int len = wq_sz << rq->wqe.info.log_num_frags;
471
472 rq->wqe.di = kvzalloc_node(len * sizeof(*rq->wqe.di),
473 GFP_KERNEL, cpu_to_node(cpu));
474 if (!rq->wqe.di)
475 return -ENOMEM;
476
477 mlx5e_init_frags_partition(rq);
478
479 return 0;
480}
481
482static void mlx5e_free_di_list(struct mlx5e_rq *rq)
483{
484 kvfree(rq->wqe.di);
485}
486
487static int mlx5e_alloc_rq(struct mlx5e_channel *c,
488 struct mlx5e_params *params,
489 struct mlx5e_rq_param *rqp,
490 struct mlx5e_rq *rq)
491{
492 struct page_pool_params pp_params = { 0 };
493 struct mlx5_core_dev *mdev = c->mdev;
494 void *rqc = rqp->rqc;
495 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
496 u32 pool_size;
497 int wq_sz;
498 int err;
499 int i;
500
501 rqp->wq.db_numa_node = cpu_to_node(c->cpu);
502
503 rq->wq_type = params->rq_wq_type;
504 rq->pdev = c->pdev;
505 rq->netdev = c->netdev;
506 rq->tstamp = c->tstamp;
507 rq->clock = &mdev->clock;
508 rq->channel = c;
509 rq->ix = c->ix;
510 rq->mdev = mdev;
511 rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
512 rq->stats = &c->priv->channel_stats[c->ix].rq;
513
514 rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
515 if (IS_ERR(rq->xdp_prog)) {
516 err = PTR_ERR(rq->xdp_prog);
517 rq->xdp_prog = NULL;
518 goto err_rq_wq_destroy;
519 }
520
521 err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix);
522 if (err < 0)
523 goto err_rq_wq_destroy;
524
525 rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
526 rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params);
527 pool_size = 1 << params->log_rq_mtu_frames;
528
529 switch (rq->wq_type) {
530 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
531 err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
532 &rq->wq_ctrl);
533 if (err)
534 return err;
535
536 rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR];
537
538 wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
539
540 pool_size = MLX5_MPWRQ_PAGES_PER_WQE << mlx5e_mpwqe_get_log_rq_size(params);
541
542 rq->post_wqes = mlx5e_post_rx_mpwqes;
543 rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
544
545 rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe_mpwqe;
546#ifdef CONFIG_MLX5_EN_IPSEC
547 if (MLX5_IPSEC_DEV(mdev)) {
548 err = -EINVAL;
549 netdev_err(c->netdev, "MPWQE RQ with IPSec offload not supported\n");
550 goto err_rq_wq_destroy;
551 }
552#endif
553 if (!rq->handle_rx_cqe) {
554 err = -EINVAL;
555 netdev_err(c->netdev, "RX handler of MPWQE RQ is not set, err %d\n", err);
556 goto err_rq_wq_destroy;
557 }
558
559 rq->mpwqe.skb_from_cqe_mpwrq =
560 mlx5e_rx_mpwqe_is_linear_skb(mdev, params) ?
561 mlx5e_skb_from_cqe_mpwrq_linear :
562 mlx5e_skb_from_cqe_mpwrq_nonlinear;
563 rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params);
564 rq->mpwqe.num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params));
565
566 err = mlx5e_create_rq_umr_mkey(mdev, rq);
567 if (err)
568 goto err_rq_wq_destroy;
569 rq->mkey_be = cpu_to_be32(rq->umr_mkey.key);
570
571 err = mlx5e_rq_alloc_mpwqe_info(rq, c);
572 if (err)
573 goto err_free;
574 break;
575 default:
576 err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
577 &rq->wq_ctrl);
578 if (err)
579 return err;
580
581 rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
582
583 wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq);
584
585 rq->wqe.info = rqp->frags_info;
586 rq->wqe.frags =
587 kvzalloc_node((wq_sz << rq->wqe.info.log_num_frags) *
588 sizeof(*rq->wqe.frags),
589 GFP_KERNEL, cpu_to_node(c->cpu));
590 if (!rq->wqe.frags) {
591 err = -ENOMEM;
592 goto err_free;
593 }
594
595 err = mlx5e_init_di_list(rq, params, wq_sz, c->cpu);
596 if (err)
597 goto err_free;
598 rq->post_wqes = mlx5e_post_rx_wqes;
599 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
600
601#ifdef CONFIG_MLX5_EN_IPSEC
602 if (c->priv->ipsec)
603 rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
604 else
605#endif
606 rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe;
607 if (!rq->handle_rx_cqe) {
608 err = -EINVAL;
609 netdev_err(c->netdev, "RX handler of RQ is not set, err %d\n", err);
610 goto err_free;
611 }
612
613 rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(mdev, params) ?
614 mlx5e_skb_from_cqe_linear :
615 mlx5e_skb_from_cqe_nonlinear;
616 rq->mkey_be = c->mkey_be;
617 }
618
619
620 pp_params.order = 0;
621 pp_params.flags = 0;
622 pp_params.pool_size = pool_size;
623 pp_params.nid = cpu_to_node(c->cpu);
624 pp_params.dev = c->pdev;
625 pp_params.dma_dir = rq->buff.map_dir;
626
627
628
629
630
631
632 rq->page_pool = page_pool_create(&pp_params);
633 if (IS_ERR(rq->page_pool)) {
634 err = PTR_ERR(rq->page_pool);
635 rq->page_pool = NULL;
636 goto err_free;
637 }
638 err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
639 MEM_TYPE_PAGE_POOL, rq->page_pool);
640 if (err)
641 goto err_free;
642
643 for (i = 0; i < wq_sz; i++) {
644 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
645 struct mlx5e_rx_wqe_ll *wqe =
646 mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i);
647 u32 byte_count =
648 rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
649 u64 dma_offset = mlx5e_get_mpwqe_offset(rq, i);
650
651 wqe->data[0].addr = cpu_to_be64(dma_offset + rq->buff.headroom);
652 wqe->data[0].byte_count = cpu_to_be32(byte_count);
653 wqe->data[0].lkey = rq->mkey_be;
654 } else {
655 struct mlx5e_rx_wqe_cyc *wqe =
656 mlx5_wq_cyc_get_wqe(&rq->wqe.wq, i);
657 int f;
658
659 for (f = 0; f < rq->wqe.info.num_frags; f++) {
660 u32 frag_size = rq->wqe.info.arr[f].frag_size |
661 MLX5_HW_START_PADDING;
662
663 wqe->data[f].byte_count = cpu_to_be32(frag_size);
664 wqe->data[f].lkey = rq->mkey_be;
665 }
666
667 if (rq->wqe.info.num_frags < (1 << rq->wqe.info.log_num_frags)) {
668 wqe->data[f].byte_count = 0;
669 wqe->data[f].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
670 wqe->data[f].addr = 0;
671 }
672 }
673 }
674
675 INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work);
676
677 switch (params->rx_cq_moderation.cq_period_mode) {
678 case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
679 rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE;
680 break;
681 case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
682 default:
683 rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
684 }
685
686 rq->page_cache.head = 0;
687 rq->page_cache.tail = 0;
688
689 return 0;
690
691err_free:
692 switch (rq->wq_type) {
693 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
694 kvfree(rq->mpwqe.info);
695 mlx5_core_destroy_mkey(mdev, &rq->umr_mkey);
696 break;
697 default:
698 kvfree(rq->wqe.frags);
699 mlx5e_free_di_list(rq);
700 }
701
702err_rq_wq_destroy:
703 if (rq->xdp_prog)
704 bpf_prog_put(rq->xdp_prog);
705 xdp_rxq_info_unreg(&rq->xdp_rxq);
706 if (rq->page_pool)
707 page_pool_destroy(rq->page_pool);
708 mlx5_wq_destroy(&rq->wq_ctrl);
709
710 return err;
711}
712
713static void mlx5e_free_rq(struct mlx5e_rq *rq)
714{
715 int i;
716
717 if (rq->xdp_prog)
718 bpf_prog_put(rq->xdp_prog);
719
720 xdp_rxq_info_unreg(&rq->xdp_rxq);
721 if (rq->page_pool)
722 page_pool_destroy(rq->page_pool);
723
724 switch (rq->wq_type) {
725 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
726 kvfree(rq->mpwqe.info);
727 mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey);
728 break;
729 default:
730 kvfree(rq->wqe.frags);
731 mlx5e_free_di_list(rq);
732 }
733
734 for (i = rq->page_cache.head; i != rq->page_cache.tail;
735 i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) {
736 struct mlx5e_dma_info *dma_info = &rq->page_cache.page_cache[i];
737
738 mlx5e_page_release(rq, dma_info, false);
739 }
740 mlx5_wq_destroy(&rq->wq_ctrl);
741}
742
743static int mlx5e_create_rq(struct mlx5e_rq *rq,
744 struct mlx5e_rq_param *param)
745{
746 struct mlx5_core_dev *mdev = rq->mdev;
747
748 void *in;
749 void *rqc;
750 void *wq;
751 int inlen;
752 int err;
753
754 inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
755 sizeof(u64) * rq->wq_ctrl.buf.npages;
756 in = kvzalloc(inlen, GFP_KERNEL);
757 if (!in)
758 return -ENOMEM;
759
760 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
761 wq = MLX5_ADDR_OF(rqc, rqc, wq);
762
763 memcpy(rqc, param->rqc, sizeof(param->rqc));
764
765 MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
766 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
767 MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
768 MLX5_ADAPTER_PAGE_SHIFT);
769 MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
770
771 mlx5_fill_page_frag_array(&rq->wq_ctrl.buf,
772 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
773
774 err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
775
776 kvfree(in);
777
778 return err;
779}
780
781static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
782 int next_state)
783{
784 struct mlx5_core_dev *mdev = rq->mdev;
785
786 void *in;
787 void *rqc;
788 int inlen;
789 int err;
790
791 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
792 in = kvzalloc(inlen, GFP_KERNEL);
793 if (!in)
794 return -ENOMEM;
795
796 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
797
798 MLX5_SET(modify_rq_in, in, rq_state, curr_state);
799 MLX5_SET(rqc, rqc, state, next_state);
800
801 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
802
803 kvfree(in);
804
805 return err;
806}
807
808static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
809{
810 struct mlx5e_channel *c = rq->channel;
811 struct mlx5e_priv *priv = c->priv;
812 struct mlx5_core_dev *mdev = priv->mdev;
813
814 void *in;
815 void *rqc;
816 int inlen;
817 int err;
818
819 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
820 in = kvzalloc(inlen, GFP_KERNEL);
821 if (!in)
822 return -ENOMEM;
823
824 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
825
826 MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
827 MLX5_SET64(modify_rq_in, in, modify_bitmask,
828 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS);
829 MLX5_SET(rqc, rqc, scatter_fcs, enable);
830 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
831
832 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
833
834 kvfree(in);
835
836 return err;
837}
838
839static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
840{
841 struct mlx5e_channel *c = rq->channel;
842 struct mlx5_core_dev *mdev = c->mdev;
843 void *in;
844 void *rqc;
845 int inlen;
846 int err;
847
848 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
849 in = kvzalloc(inlen, GFP_KERNEL);
850 if (!in)
851 return -ENOMEM;
852
853 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
854
855 MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
856 MLX5_SET64(modify_rq_in, in, modify_bitmask,
857 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
858 MLX5_SET(rqc, rqc, vsd, vsd);
859 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
860
861 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
862
863 kvfree(in);
864
865 return err;
866}
867
868static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
869{
870 mlx5_core_destroy_rq(rq->mdev, rq->rqn);
871}
872
873static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
874{
875 unsigned long exp_time = jiffies + msecs_to_jiffies(wait_time);
876 struct mlx5e_channel *c = rq->channel;
877
878 u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5e_rqwq_get_size(rq));
879
880 do {
881 if (mlx5e_rqwq_get_cur_sz(rq) >= min_wqes)
882 return 0;
883
884 msleep(20);
885 } while (time_before(jiffies, exp_time));
886
887 netdev_warn(c->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
888 c->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes);
889
890 return -ETIMEDOUT;
891}
892
893static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
894{
895 __be16 wqe_ix_be;
896 u16 wqe_ix;
897
898 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
899 struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
900
901
902 if (rq->mpwqe.umr_in_progress)
903 rq->dealloc_wqe(rq, wq->head);
904
905 while (!mlx5_wq_ll_is_empty(wq)) {
906 struct mlx5e_rx_wqe_ll *wqe;
907
908 wqe_ix_be = *wq->tail_next;
909 wqe_ix = be16_to_cpu(wqe_ix_be);
910 wqe = mlx5_wq_ll_get_wqe(wq, wqe_ix);
911 rq->dealloc_wqe(rq, wqe_ix);
912 mlx5_wq_ll_pop(wq, wqe_ix_be,
913 &wqe->next.next_wqe_index);
914 }
915 } else {
916 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
917
918 while (!mlx5_wq_cyc_is_empty(wq)) {
919 wqe_ix = mlx5_wq_cyc_get_tail(wq);
920 rq->dealloc_wqe(rq, wqe_ix);
921 mlx5_wq_cyc_pop(wq);
922 }
923 }
924
925}
926
927static int mlx5e_open_rq(struct mlx5e_channel *c,
928 struct mlx5e_params *params,
929 struct mlx5e_rq_param *param,
930 struct mlx5e_rq *rq)
931{
932 int err;
933
934 err = mlx5e_alloc_rq(c, params, param, rq);
935 if (err)
936 return err;
937
938 err = mlx5e_create_rq(rq, param);
939 if (err)
940 goto err_free_rq;
941
942 err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
943 if (err)
944 goto err_destroy_rq;
945
946 if (MLX5_CAP_ETH(c->mdev, cqe_checksum_full))
947 __set_bit(MLX5E_RQ_STATE_CSUM_FULL, &c->rq.state);
948
949 if (params->rx_dim_enabled)
950 __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
951
952 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE))
953 __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
954
955 return 0;
956
957err_destroy_rq:
958 mlx5e_destroy_rq(rq);
959err_free_rq:
960 mlx5e_free_rq(rq);
961
962 return err;
963}
964
965static void mlx5e_activate_rq(struct mlx5e_rq *rq)
966{
967 struct mlx5e_icosq *sq = &rq->channel->icosq;
968 struct mlx5_wq_cyc *wq = &sq->wq;
969 struct mlx5e_tx_wqe *nopwqe;
970
971 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
972
973 set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
974 sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
975 nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
976 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
977}
978
979static void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
980{
981 clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
982 napi_synchronize(&rq->channel->napi);
983}
984
985static void mlx5e_close_rq(struct mlx5e_rq *rq)
986{
987 cancel_work_sync(&rq->dim.work);
988 mlx5e_destroy_rq(rq);
989 mlx5e_free_rx_descs(rq);
990 mlx5e_free_rq(rq);
991}
992
993static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
994{
995 kvfree(sq->db.xdpi);
996}
997
998static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
999{
1000 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1001
1002 sq->db.xdpi = kvzalloc_node(array_size(wq_sz, sizeof(*sq->db.xdpi)),
1003 GFP_KERNEL, numa);
1004 if (!sq->db.xdpi) {
1005 mlx5e_free_xdpsq_db(sq);
1006 return -ENOMEM;
1007 }
1008
1009 return 0;
1010}
1011
1012static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
1013 struct mlx5e_params *params,
1014 struct mlx5e_sq_param *param,
1015 struct mlx5e_xdpsq *sq,
1016 bool is_redirect)
1017{
1018 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
1019 struct mlx5_core_dev *mdev = c->mdev;
1020 struct mlx5_wq_cyc *wq = &sq->wq;
1021 int err;
1022
1023 sq->pdev = c->pdev;
1024 sq->mkey_be = c->mkey_be;
1025 sq->channel = c;
1026 sq->uar_map = mdev->mlx5e_res.bfreg.map;
1027 sq->min_inline_mode = params->tx_min_inline_mode;
1028 sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
1029 sq->stats = is_redirect ?
1030 &c->priv->channel_stats[c->ix].xdpsq :
1031 &c->priv->channel_stats[c->ix].rq_xdpsq;
1032
1033 param->wq.db_numa_node = cpu_to_node(c->cpu);
1034 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl);
1035 if (err)
1036 return err;
1037 wq->db = &wq->db[MLX5_SND_DBR];
1038
1039 err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
1040 if (err)
1041 goto err_sq_wq_destroy;
1042
1043 return 0;
1044
1045err_sq_wq_destroy:
1046 mlx5_wq_destroy(&sq->wq_ctrl);
1047
1048 return err;
1049}
1050
1051static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq)
1052{
1053 mlx5e_free_xdpsq_db(sq);
1054 mlx5_wq_destroy(&sq->wq_ctrl);
1055}
1056
1057static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq)
1058{
1059 kvfree(sq->db.ico_wqe);
1060}
1061
1062static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
1063{
1064 u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1065
1066 sq->db.ico_wqe = kvzalloc_node(array_size(wq_sz,
1067 sizeof(*sq->db.ico_wqe)),
1068 GFP_KERNEL, numa);
1069 if (!sq->db.ico_wqe)
1070 return -ENOMEM;
1071
1072 return 0;
1073}
1074
1075static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
1076 struct mlx5e_sq_param *param,
1077 struct mlx5e_icosq *sq)
1078{
1079 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
1080 struct mlx5_core_dev *mdev = c->mdev;
1081 struct mlx5_wq_cyc *wq = &sq->wq;
1082 int err;
1083
1084 sq->channel = c;
1085 sq->uar_map = mdev->mlx5e_res.bfreg.map;
1086
1087 param->wq.db_numa_node = cpu_to_node(c->cpu);
1088 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl);
1089 if (err)
1090 return err;
1091 wq->db = &wq->db[MLX5_SND_DBR];
1092
1093 err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
1094 if (err)
1095 goto err_sq_wq_destroy;
1096
1097 return 0;
1098
1099err_sq_wq_destroy:
1100 mlx5_wq_destroy(&sq->wq_ctrl);
1101
1102 return err;
1103}
1104
1105static void mlx5e_free_icosq(struct mlx5e_icosq *sq)
1106{
1107 mlx5e_free_icosq_db(sq);
1108 mlx5_wq_destroy(&sq->wq_ctrl);
1109}
1110
1111static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq)
1112{
1113 kvfree(sq->db.wqe_info);
1114 kvfree(sq->db.dma_fifo);
1115}
1116
1117static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
1118{
1119 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1120 int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
1121
1122 sq->db.dma_fifo = kvzalloc_node(array_size(df_sz,
1123 sizeof(*sq->db.dma_fifo)),
1124 GFP_KERNEL, numa);
1125 sq->db.wqe_info = kvzalloc_node(array_size(wq_sz,
1126 sizeof(*sq->db.wqe_info)),
1127 GFP_KERNEL, numa);
1128 if (!sq->db.dma_fifo || !sq->db.wqe_info) {
1129 mlx5e_free_txqsq_db(sq);
1130 return -ENOMEM;
1131 }
1132
1133 sq->dma_fifo_mask = df_sz - 1;
1134
1135 return 0;
1136}
1137
1138static void mlx5e_sq_recover(struct work_struct *work);
1139static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
1140 int txq_ix,
1141 struct mlx5e_params *params,
1142 struct mlx5e_sq_param *param,
1143 struct mlx5e_txqsq *sq,
1144 int tc)
1145{
1146 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
1147 struct mlx5_core_dev *mdev = c->mdev;
1148 struct mlx5_wq_cyc *wq = &sq->wq;
1149 int err;
1150
1151 sq->pdev = c->pdev;
1152 sq->tstamp = c->tstamp;
1153 sq->clock = &mdev->clock;
1154 sq->mkey_be = c->mkey_be;
1155 sq->channel = c;
1156 sq->ch_ix = c->ix;
1157 sq->txq_ix = txq_ix;
1158 sq->uar_map = mdev->mlx5e_res.bfreg.map;
1159 sq->min_inline_mode = params->tx_min_inline_mode;
1160 sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
1161 INIT_WORK(&sq->recover.recover_work, mlx5e_sq_recover);
1162 if (MLX5_IPSEC_DEV(c->priv->mdev))
1163 set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
1164 if (mlx5_accel_is_tls_device(c->priv->mdev))
1165 set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
1166
1167 param->wq.db_numa_node = cpu_to_node(c->cpu);
1168 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl);
1169 if (err)
1170 return err;
1171 wq->db = &wq->db[MLX5_SND_DBR];
1172
1173 err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
1174 if (err)
1175 goto err_sq_wq_destroy;
1176
1177 INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work);
1178 sq->dim.mode = params->tx_cq_moderation.cq_period_mode;
1179
1180 return 0;
1181
1182err_sq_wq_destroy:
1183 mlx5_wq_destroy(&sq->wq_ctrl);
1184
1185 return err;
1186}
1187
1188static void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
1189{
1190 mlx5e_free_txqsq_db(sq);
1191 mlx5_wq_destroy(&sq->wq_ctrl);
1192}
1193
1194struct mlx5e_create_sq_param {
1195 struct mlx5_wq_ctrl *wq_ctrl;
1196 u32 cqn;
1197 u32 tisn;
1198 u8 tis_lst_sz;
1199 u8 min_inline_mode;
1200};
1201
1202static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
1203 struct mlx5e_sq_param *param,
1204 struct mlx5e_create_sq_param *csp,
1205 u32 *sqn)
1206{
1207 void *in;
1208 void *sqc;
1209 void *wq;
1210 int inlen;
1211 int err;
1212
1213 inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
1214 sizeof(u64) * csp->wq_ctrl->buf.npages;
1215 in = kvzalloc(inlen, GFP_KERNEL);
1216 if (!in)
1217 return -ENOMEM;
1218
1219 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
1220 wq = MLX5_ADDR_OF(sqc, sqc, wq);
1221
1222 memcpy(sqc, param->sqc, sizeof(param->sqc));
1223 MLX5_SET(sqc, sqc, tis_lst_sz, csp->tis_lst_sz);
1224 MLX5_SET(sqc, sqc, tis_num_0, csp->tisn);
1225 MLX5_SET(sqc, sqc, cqn, csp->cqn);
1226
1227 if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
1228 MLX5_SET(sqc, sqc, min_wqe_inline_mode, csp->min_inline_mode);
1229
1230 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
1231 MLX5_SET(sqc, sqc, flush_in_error_en, 1);
1232
1233 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
1234 MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.bfreg.index);
1235 MLX5_SET(wq, wq, log_wq_pg_sz, csp->wq_ctrl->buf.page_shift -
1236 MLX5_ADAPTER_PAGE_SHIFT);
1237 MLX5_SET64(wq, wq, dbr_addr, csp->wq_ctrl->db.dma);
1238
1239 mlx5_fill_page_frag_array(&csp->wq_ctrl->buf,
1240 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
1241
1242 err = mlx5_core_create_sq(mdev, in, inlen, sqn);
1243
1244 kvfree(in);
1245
1246 return err;
1247}
1248
1249struct mlx5e_modify_sq_param {
1250 int curr_state;
1251 int next_state;
1252 bool rl_update;
1253 int rl_index;
1254};
1255
1256static int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
1257 struct mlx5e_modify_sq_param *p)
1258{
1259 void *in;
1260 void *sqc;
1261 int inlen;
1262 int err;
1263
1264 inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
1265 in = kvzalloc(inlen, GFP_KERNEL);
1266 if (!in)
1267 return -ENOMEM;
1268
1269 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
1270
1271 MLX5_SET(modify_sq_in, in, sq_state, p->curr_state);
1272 MLX5_SET(sqc, sqc, state, p->next_state);
1273 if (p->rl_update && p->next_state == MLX5_SQC_STATE_RDY) {
1274 MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
1275 MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index);
1276 }
1277
1278 err = mlx5_core_modify_sq(mdev, sqn, in, inlen);
1279
1280 kvfree(in);
1281
1282 return err;
1283}
1284
1285static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
1286{
1287 mlx5_core_destroy_sq(mdev, sqn);
1288}
1289
1290static int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
1291 struct mlx5e_sq_param *param,
1292 struct mlx5e_create_sq_param *csp,
1293 u32 *sqn)
1294{
1295 struct mlx5e_modify_sq_param msp = {0};
1296 int err;
1297
1298 err = mlx5e_create_sq(mdev, param, csp, sqn);
1299 if (err)
1300 return err;
1301
1302 msp.curr_state = MLX5_SQC_STATE_RST;
1303 msp.next_state = MLX5_SQC_STATE_RDY;
1304 err = mlx5e_modify_sq(mdev, *sqn, &msp);
1305 if (err)
1306 mlx5e_destroy_sq(mdev, *sqn);
1307
1308 return err;
1309}
1310
1311static int mlx5e_set_sq_maxrate(struct net_device *dev,
1312 struct mlx5e_txqsq *sq, u32 rate);
1313
1314static int mlx5e_open_txqsq(struct mlx5e_channel *c,
1315 u32 tisn,
1316 int txq_ix,
1317 struct mlx5e_params *params,
1318 struct mlx5e_sq_param *param,
1319 struct mlx5e_txqsq *sq,
1320 int tc)
1321{
1322 struct mlx5e_create_sq_param csp = {};
1323 u32 tx_rate;
1324 int err;
1325
1326 err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq, tc);
1327 if (err)
1328 return err;
1329
1330 csp.tisn = tisn;
1331 csp.tis_lst_sz = 1;
1332 csp.cqn = sq->cq.mcq.cqn;
1333 csp.wq_ctrl = &sq->wq_ctrl;
1334 csp.min_inline_mode = sq->min_inline_mode;
1335 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
1336 if (err)
1337 goto err_free_txqsq;
1338
1339 tx_rate = c->priv->tx_rates[sq->txq_ix];
1340 if (tx_rate)
1341 mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate);
1342
1343 if (params->tx_dim_enabled)
1344 sq->state |= BIT(MLX5E_SQ_STATE_AM);
1345
1346 return 0;
1347
1348err_free_txqsq:
1349 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1350 mlx5e_free_txqsq(sq);
1351
1352 return err;
1353}
1354
1355static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq)
1356{
1357 WARN_ONCE(sq->cc != sq->pc,
1358 "SQ 0x%x: cc (0x%x) != pc (0x%x)\n",
1359 sq->sqn, sq->cc, sq->pc);
1360 sq->cc = 0;
1361 sq->dma_fifo_cc = 0;
1362 sq->pc = 0;
1363}
1364
1365static void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
1366{
1367 sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix);
1368 clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
1369 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1370 netdev_tx_reset_queue(sq->txq);
1371 netif_tx_start_queue(sq->txq);
1372}
1373
1374static inline void netif_tx_disable_queue(struct netdev_queue *txq)
1375{
1376 __netif_tx_lock_bh(txq);
1377 netif_tx_stop_queue(txq);
1378 __netif_tx_unlock_bh(txq);
1379}
1380
1381static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
1382{
1383 struct mlx5e_channel *c = sq->channel;
1384 struct mlx5_wq_cyc *wq = &sq->wq;
1385
1386 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1387
1388 napi_synchronize(&c->napi);
1389
1390 netif_tx_disable_queue(sq->txq);
1391
1392
1393 if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) {
1394 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
1395 struct mlx5e_tx_wqe *nop;
1396
1397 sq->db.wqe_info[pi].skb = NULL;
1398 nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
1399 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl);
1400 }
1401}
1402
1403static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
1404{
1405 struct mlx5e_channel *c = sq->channel;
1406 struct mlx5_core_dev *mdev = c->mdev;
1407 struct mlx5_rate_limit rl = {0};
1408
1409 cancel_work_sync(&sq->dim.work);
1410 mlx5e_destroy_sq(mdev, sq->sqn);
1411 if (sq->rate_limit) {
1412 rl.rate = sq->rate_limit;
1413 mlx5_rl_remove_rate(mdev, &rl);
1414 }
1415 mlx5e_free_txqsq_descs(sq);
1416 mlx5e_free_txqsq(sq);
1417}
1418
1419static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
1420{
1421 unsigned long exp_time = jiffies + msecs_to_jiffies(2000);
1422
1423 while (time_before(jiffies, exp_time)) {
1424 if (sq->cc == sq->pc)
1425 return 0;
1426
1427 msleep(20);
1428 }
1429
1430 netdev_err(sq->channel->netdev,
1431 "Wait for SQ 0x%x flush timeout (sq cc = 0x%x, sq pc = 0x%x)\n",
1432 sq->sqn, sq->cc, sq->pc);
1433
1434 return -ETIMEDOUT;
1435}
1436
1437static int mlx5e_sq_to_ready(struct mlx5e_txqsq *sq, int curr_state)
1438{
1439 struct mlx5_core_dev *mdev = sq->channel->mdev;
1440 struct net_device *dev = sq->channel->netdev;
1441 struct mlx5e_modify_sq_param msp = {0};
1442 int err;
1443
1444 msp.curr_state = curr_state;
1445 msp.next_state = MLX5_SQC_STATE_RST;
1446
1447 err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
1448 if (err) {
1449 netdev_err(dev, "Failed to move sq 0x%x to reset\n", sq->sqn);
1450 return err;
1451 }
1452
1453 memset(&msp, 0, sizeof(msp));
1454 msp.curr_state = MLX5_SQC_STATE_RST;
1455 msp.next_state = MLX5_SQC_STATE_RDY;
1456
1457 err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
1458 if (err) {
1459 netdev_err(dev, "Failed to move sq 0x%x to ready\n", sq->sqn);
1460 return err;
1461 }
1462
1463 return 0;
1464}
1465
1466static void mlx5e_sq_recover(struct work_struct *work)
1467{
1468 struct mlx5e_txqsq_recover *recover =
1469 container_of(work, struct mlx5e_txqsq_recover,
1470 recover_work);
1471 struct mlx5e_txqsq *sq = container_of(recover, struct mlx5e_txqsq,
1472 recover);
1473 struct mlx5_core_dev *mdev = sq->channel->mdev;
1474 struct net_device *dev = sq->channel->netdev;
1475 u8 state;
1476 int err;
1477
1478 err = mlx5_core_query_sq_state(mdev, sq->sqn, &state);
1479 if (err) {
1480 netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n",
1481 sq->sqn, err);
1482 return;
1483 }
1484
1485 if (state != MLX5_RQC_STATE_ERR) {
1486 netdev_err(dev, "SQ 0x%x not in ERROR state\n", sq->sqn);
1487 return;
1488 }
1489
1490 netif_tx_disable_queue(sq->txq);
1491
1492 if (mlx5e_wait_for_sq_flush(sq))
1493 return;
1494
1495
1496
1497
1498
1499
1500 if (jiffies_to_msecs(jiffies - recover->last_recover) <
1501 MLX5E_SQ_RECOVER_MIN_INTERVAL) {
1502 netdev_err(dev, "Recover SQ 0x%x canceled, too many error CQEs\n",
1503 sq->sqn);
1504 return;
1505 }
1506
1507
1508
1509
1510
1511 if (mlx5e_sq_to_ready(sq, state))
1512 return;
1513
1514 mlx5e_reset_txqsq_cc_pc(sq);
1515 sq->stats->recover++;
1516 recover->last_recover = jiffies;
1517 mlx5e_activate_txqsq(sq);
1518}
1519
1520static int mlx5e_open_icosq(struct mlx5e_channel *c,
1521 struct mlx5e_params *params,
1522 struct mlx5e_sq_param *param,
1523 struct mlx5e_icosq *sq)
1524{
1525 struct mlx5e_create_sq_param csp = {};
1526 int err;
1527
1528 err = mlx5e_alloc_icosq(c, param, sq);
1529 if (err)
1530 return err;
1531
1532 csp.cqn = sq->cq.mcq.cqn;
1533 csp.wq_ctrl = &sq->wq_ctrl;
1534 csp.min_inline_mode = params->tx_min_inline_mode;
1535 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1536 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
1537 if (err)
1538 goto err_free_icosq;
1539
1540 return 0;
1541
1542err_free_icosq:
1543 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1544 mlx5e_free_icosq(sq);
1545
1546 return err;
1547}
1548
1549static void mlx5e_close_icosq(struct mlx5e_icosq *sq)
1550{
1551 struct mlx5e_channel *c = sq->channel;
1552
1553 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1554 napi_synchronize(&c->napi);
1555
1556 mlx5e_destroy_sq(c->mdev, sq->sqn);
1557 mlx5e_free_icosq(sq);
1558}
1559
1560static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
1561 struct mlx5e_params *params,
1562 struct mlx5e_sq_param *param,
1563 struct mlx5e_xdpsq *sq,
1564 bool is_redirect)
1565{
1566 unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT;
1567 struct mlx5e_create_sq_param csp = {};
1568 unsigned int inline_hdr_sz = 0;
1569 int err;
1570 int i;
1571
1572 err = mlx5e_alloc_xdpsq(c, params, param, sq, is_redirect);
1573 if (err)
1574 return err;
1575
1576 csp.tis_lst_sz = 1;
1577 csp.tisn = c->priv->tisn[c->lag_port][0];
1578 csp.cqn = sq->cq.mcq.cqn;
1579 csp.wq_ctrl = &sq->wq_ctrl;
1580 csp.min_inline_mode = sq->min_inline_mode;
1581 if (is_redirect)
1582 set_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state);
1583 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1584 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
1585 if (err)
1586 goto err_free_xdpsq;
1587
1588 if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
1589 inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
1590 ds_cnt++;
1591 }
1592
1593
1594 for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
1595 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
1596 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
1597 struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
1598 struct mlx5_wqe_data_seg *dseg;
1599
1600 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
1601 eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
1602
1603 dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1);
1604 dseg->lkey = sq->mkey_be;
1605 }
1606
1607 return 0;
1608
1609err_free_xdpsq:
1610 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1611 mlx5e_free_xdpsq(sq);
1612
1613 return err;
1614}
1615
1616static void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
1617{
1618 struct mlx5e_channel *c = sq->channel;
1619
1620 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1621 napi_synchronize(&c->napi);
1622
1623 mlx5e_destroy_sq(c->mdev, sq->sqn);
1624 mlx5e_free_xdpsq_descs(sq);
1625 mlx5e_free_xdpsq(sq);
1626}
1627
1628static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
1629 struct mlx5e_cq_param *param,
1630 struct mlx5e_cq *cq)
1631{
1632 struct mlx5_core_cq *mcq = &cq->mcq;
1633 int eqn_not_used;
1634 unsigned int irqn;
1635 int err;
1636 u32 i;
1637
1638 err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
1639 if (err)
1640 return err;
1641
1642 err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq,
1643 &cq->wq_ctrl);
1644 if (err)
1645 return err;
1646
1647 mcq->cqe_sz = 64;
1648 mcq->set_ci_db = cq->wq_ctrl.db.db;
1649 mcq->arm_db = cq->wq_ctrl.db.db + 1;
1650 *mcq->set_ci_db = 0;
1651 *mcq->arm_db = 0;
1652 mcq->vector = param->eq_ix;
1653 mcq->comp = mlx5e_completion_event;
1654 mcq->event = mlx5e_cq_error_event;
1655 mcq->irqn = irqn;
1656
1657 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
1658 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
1659
1660 cqe->op_own = 0xf1;
1661 }
1662
1663 cq->mdev = mdev;
1664
1665 return 0;
1666}
1667
1668static int mlx5e_alloc_cq(struct mlx5e_channel *c,
1669 struct mlx5e_cq_param *param,
1670 struct mlx5e_cq *cq)
1671{
1672 struct mlx5_core_dev *mdev = c->priv->mdev;
1673 int err;
1674
1675 param->wq.buf_numa_node = cpu_to_node(c->cpu);
1676 param->wq.db_numa_node = cpu_to_node(c->cpu);
1677 param->eq_ix = c->ix;
1678
1679 err = mlx5e_alloc_cq_common(mdev, param, cq);
1680
1681 cq->napi = &c->napi;
1682 cq->channel = c;
1683
1684 return err;
1685}
1686
1687static void mlx5e_free_cq(struct mlx5e_cq *cq)
1688{
1689 mlx5_wq_destroy(&cq->wq_ctrl);
1690}
1691
1692static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
1693{
1694 struct mlx5_core_dev *mdev = cq->mdev;
1695 struct mlx5_core_cq *mcq = &cq->mcq;
1696
1697 void *in;
1698 void *cqc;
1699 int inlen;
1700 unsigned int irqn_not_used;
1701 int eqn;
1702 int err;
1703
1704 err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
1705 if (err)
1706 return err;
1707
1708 inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
1709 sizeof(u64) * cq->wq_ctrl.buf.npages;
1710 in = kvzalloc(inlen, GFP_KERNEL);
1711 if (!in)
1712 return -ENOMEM;
1713
1714 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
1715
1716 memcpy(cqc, param->cqc, sizeof(param->cqc));
1717
1718 mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
1719 (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
1720
1721 MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
1722 MLX5_SET(cqc, cqc, c_eqn, eqn);
1723 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
1724 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
1725 MLX5_ADAPTER_PAGE_SHIFT);
1726 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
1727
1728 err = mlx5_core_create_cq(mdev, mcq, in, inlen);
1729
1730 kvfree(in);
1731
1732 if (err)
1733 return err;
1734
1735 mlx5e_cq_arm(cq);
1736
1737 return 0;
1738}
1739
1740static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
1741{
1742 mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
1743}
1744
1745static int mlx5e_open_cq(struct mlx5e_channel *c,
1746 struct net_dim_cq_moder moder,
1747 struct mlx5e_cq_param *param,
1748 struct mlx5e_cq *cq)
1749{
1750 struct mlx5_core_dev *mdev = c->mdev;
1751 int err;
1752
1753 err = mlx5e_alloc_cq(c, param, cq);
1754 if (err)
1755 return err;
1756
1757 err = mlx5e_create_cq(cq, param);
1758 if (err)
1759 goto err_free_cq;
1760
1761 if (MLX5_CAP_GEN(mdev, cq_moderation))
1762 mlx5_core_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts);
1763 return 0;
1764
1765err_free_cq:
1766 mlx5e_free_cq(cq);
1767
1768 return err;
1769}
1770
1771static void mlx5e_close_cq(struct mlx5e_cq *cq)
1772{
1773 mlx5e_destroy_cq(cq);
1774 mlx5e_free_cq(cq);
1775}
1776
1777static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
1778 struct mlx5e_params *params,
1779 struct mlx5e_channel_param *cparam)
1780{
1781 int err;
1782 int tc;
1783
1784 for (tc = 0; tc < c->num_tc; tc++) {
1785 err = mlx5e_open_cq(c, params->tx_cq_moderation,
1786 &cparam->tx_cq, &c->sq[tc].cq);
1787 if (err)
1788 goto err_close_tx_cqs;
1789 }
1790
1791 return 0;
1792
1793err_close_tx_cqs:
1794 for (tc--; tc >= 0; tc--)
1795 mlx5e_close_cq(&c->sq[tc].cq);
1796
1797 return err;
1798}
1799
1800static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
1801{
1802 int tc;
1803
1804 for (tc = 0; tc < c->num_tc; tc++)
1805 mlx5e_close_cq(&c->sq[tc].cq);
1806}
1807
1808static int mlx5e_open_sqs(struct mlx5e_channel *c,
1809 struct mlx5e_params *params,
1810 struct mlx5e_channel_param *cparam)
1811{
1812 struct mlx5e_priv *priv = c->priv;
1813 int err, tc, max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
1814
1815 for (tc = 0; tc < params->num_tc; tc++) {
1816 int txq_ix = c->ix + tc * max_nch;
1817
1818 err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
1819 params, &cparam->sq, &c->sq[tc], tc);
1820 if (err)
1821 goto err_close_sqs;
1822 }
1823
1824 return 0;
1825
1826err_close_sqs:
1827 for (tc--; tc >= 0; tc--)
1828 mlx5e_close_txqsq(&c->sq[tc]);
1829
1830 return err;
1831}
1832
1833static void mlx5e_close_sqs(struct mlx5e_channel *c)
1834{
1835 int tc;
1836
1837 for (tc = 0; tc < c->num_tc; tc++)
1838 mlx5e_close_txqsq(&c->sq[tc]);
1839}
1840
1841static int mlx5e_set_sq_maxrate(struct net_device *dev,
1842 struct mlx5e_txqsq *sq, u32 rate)
1843{
1844 struct mlx5e_priv *priv = netdev_priv(dev);
1845 struct mlx5_core_dev *mdev = priv->mdev;
1846 struct mlx5e_modify_sq_param msp = {0};
1847 struct mlx5_rate_limit rl = {0};
1848 u16 rl_index = 0;
1849 int err;
1850
1851 if (rate == sq->rate_limit)
1852
1853 return 0;
1854
1855 if (sq->rate_limit) {
1856 rl.rate = sq->rate_limit;
1857
1858 mlx5_rl_remove_rate(mdev, &rl);
1859 }
1860
1861 sq->rate_limit = 0;
1862
1863 if (rate) {
1864 rl.rate = rate;
1865 err = mlx5_rl_add_rate(mdev, &rl_index, &rl);
1866 if (err) {
1867 netdev_err(dev, "Failed configuring rate %u: %d\n",
1868 rate, err);
1869 return err;
1870 }
1871 }
1872
1873 msp.curr_state = MLX5_SQC_STATE_RDY;
1874 msp.next_state = MLX5_SQC_STATE_RDY;
1875 msp.rl_index = rl_index;
1876 msp.rl_update = true;
1877 err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
1878 if (err) {
1879 netdev_err(dev, "Failed configuring rate %u: %d\n",
1880 rate, err);
1881
1882 if (rate)
1883 mlx5_rl_remove_rate(mdev, &rl);
1884 return err;
1885 }
1886
1887 sq->rate_limit = rate;
1888 return 0;
1889}
1890
1891static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
1892{
1893 struct mlx5e_priv *priv = netdev_priv(dev);
1894 struct mlx5_core_dev *mdev = priv->mdev;
1895 struct mlx5e_txqsq *sq = priv->txq2sq[index];
1896 int err = 0;
1897
1898 if (!mlx5_rl_is_supported(mdev)) {
1899 netdev_err(dev, "Rate limiting is not supported on this device\n");
1900 return -EINVAL;
1901 }
1902
1903
1904 rate = rate << 10;
1905
1906
1907 if (rate && !mlx5_rl_is_in_range(mdev, rate)) {
1908 netdev_err(dev, "TX rate %u, is not in range\n", rate);
1909 return -ERANGE;
1910 }
1911
1912 mutex_lock(&priv->state_lock);
1913 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
1914 err = mlx5e_set_sq_maxrate(dev, sq, rate);
1915 if (!err)
1916 priv->tx_rates[index] = rate;
1917 mutex_unlock(&priv->state_lock);
1918
1919 return err;
1920}
1921
1922static u8 mlx5e_enumerate_lag_port(struct mlx5_core_dev *mdev, int ix)
1923{
1924 u16 port_aff_bias = mlx5_core_is_pf(mdev) ? 0 : MLX5_CAP_GEN(mdev, vhca_id);
1925
1926 return (ix + port_aff_bias) % mlx5e_get_num_lag_ports(mdev);
1927}
1928
1929static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1930 struct mlx5e_params *params,
1931 struct mlx5e_channel_param *cparam,
1932 struct mlx5e_channel **cp)
1933{
1934 int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, ix));
1935 struct net_dim_cq_moder icocq_moder = {0, 0};
1936 struct net_device *netdev = priv->netdev;
1937 struct mlx5e_channel *c;
1938 unsigned int irq;
1939 int err;
1940 int eqn;
1941
1942 err = mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
1943 if (err)
1944 return err;
1945
1946 c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
1947 if (!c)
1948 return -ENOMEM;
1949
1950 c->priv = priv;
1951 c->mdev = priv->mdev;
1952 c->tstamp = &priv->tstamp;
1953 c->ix = ix;
1954 c->cpu = cpu;
1955 c->pdev = &priv->mdev->pdev->dev;
1956 c->netdev = priv->netdev;
1957 c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
1958 c->num_tc = params->num_tc;
1959 c->xdp = !!params->xdp_prog;
1960 c->stats = &priv->channel_stats[ix].ch;
1961
1962#ifdef CONFIG_GENERIC_HARDIRQS
1963 c->irq_desc = irq_to_desc(irq);
1964#endif
1965 c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix);
1966
1967 netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
1968
1969 err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->icosq.cq);
1970 if (err)
1971 goto err_napi_del;
1972
1973 err = mlx5e_open_tx_cqs(c, params, cparam);
1974 if (err)
1975 goto err_close_icosq_cq;
1976
1977 err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->tx_cq, &c->xdpsq.cq);
1978 if (err)
1979 goto err_close_tx_cqs;
1980
1981 err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq);
1982 if (err)
1983 goto err_close_xdp_tx_cqs;
1984
1985
1986 err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation,
1987 &cparam->tx_cq, &c->rq.xdpsq.cq) : 0;
1988 if (err)
1989 goto err_close_rx_cq;
1990
1991 napi_enable(&c->napi);
1992
1993 err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq);
1994 if (err)
1995 goto err_disable_napi;
1996
1997 err = mlx5e_open_sqs(c, params, cparam);
1998 if (err)
1999 goto err_close_icosq;
2000
2001 err = c->xdp ? mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->rq.xdpsq, false) : 0;
2002 if (err)
2003 goto err_close_sqs;
2004
2005 err = mlx5e_open_rq(c, params, &cparam->rq, &c->rq);
2006 if (err)
2007 goto err_close_xdp_sq;
2008
2009 err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->xdpsq, true);
2010 if (err)
2011 goto err_close_rq;
2012
2013 *cp = c;
2014
2015 return 0;
2016
2017err_close_rq:
2018 mlx5e_close_rq(&c->rq);
2019
2020err_close_xdp_sq:
2021 if (c->xdp)
2022 mlx5e_close_xdpsq(&c->rq.xdpsq);
2023
2024err_close_sqs:
2025 mlx5e_close_sqs(c);
2026
2027err_close_icosq:
2028 mlx5e_close_icosq(&c->icosq);
2029
2030err_disable_napi:
2031 napi_disable(&c->napi);
2032 if (c->xdp)
2033 mlx5e_close_cq(&c->rq.xdpsq.cq);
2034
2035err_close_rx_cq:
2036 mlx5e_close_cq(&c->rq.cq);
2037
2038err_close_xdp_tx_cqs:
2039 mlx5e_close_cq(&c->xdpsq.cq);
2040
2041err_close_tx_cqs:
2042 mlx5e_close_tx_cqs(c);
2043
2044err_close_icosq_cq:
2045 mlx5e_close_cq(&c->icosq.cq);
2046
2047err_napi_del:
2048 netif_napi_del(&c->napi);
2049 kvfree(c);
2050
2051 return err;
2052}
2053
2054static void mlx5e_activate_channel(struct mlx5e_channel *c)
2055{
2056 int tc;
2057
2058 for (tc = 0; tc < c->num_tc; tc++)
2059 mlx5e_activate_txqsq(&c->sq[tc]);
2060 mlx5e_activate_rq(&c->rq);
2061 netif_set_xps_queue(c->netdev, get_cpu_mask(c->cpu), c->ix);
2062}
2063
2064static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
2065{
2066 int tc;
2067
2068 mlx5e_deactivate_rq(&c->rq);
2069 for (tc = 0; tc < c->num_tc; tc++)
2070 mlx5e_deactivate_txqsq(&c->sq[tc]);
2071}
2072
2073static void mlx5e_close_channel(struct mlx5e_channel *c)
2074{
2075 mlx5e_close_xdpsq(&c->xdpsq);
2076 mlx5e_close_rq(&c->rq);
2077 if (c->xdp)
2078 mlx5e_close_xdpsq(&c->rq.xdpsq);
2079 mlx5e_close_sqs(c);
2080 mlx5e_close_icosq(&c->icosq);
2081 napi_disable(&c->napi);
2082 if (c->xdp)
2083 mlx5e_close_cq(&c->rq.xdpsq.cq);
2084 mlx5e_close_cq(&c->rq.cq);
2085 mlx5e_close_cq(&c->xdpsq.cq);
2086 mlx5e_close_tx_cqs(c);
2087 mlx5e_close_cq(&c->icosq.cq);
2088 netif_napi_del(&c->napi);
2089
2090 kvfree(c);
2091}
2092
2093#define DEFAULT_FRAG_SIZE (2048)
2094
2095static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
2096 struct mlx5e_params *params,
2097 struct mlx5e_rq_frags_info *info)
2098{
2099 u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu);
2100 int frag_size_max = DEFAULT_FRAG_SIZE;
2101 u32 buf_size = 0;
2102 int i;
2103
2104#ifdef CONFIG_MLX5_EN_IPSEC
2105 if (MLX5_IPSEC_DEV(mdev))
2106 byte_count += MLX5E_METADATA_ETHER_LEN;
2107#endif
2108
2109 if (mlx5e_rx_is_linear_skb(mdev, params)) {
2110 int frag_stride;
2111
2112 frag_stride = mlx5e_rx_get_linear_frag_sz(params);
2113 frag_stride = roundup_pow_of_two(frag_stride);
2114
2115 info->arr[0].frag_size = byte_count;
2116 info->arr[0].frag_stride = frag_stride;
2117 info->num_frags = 1;
2118 info->wqe_bulk = PAGE_SIZE / frag_stride;
2119 goto out;
2120 }
2121
2122 if (byte_count > PAGE_SIZE +
2123 (MLX5E_MAX_RX_FRAGS - 1) * frag_size_max)
2124 frag_size_max = PAGE_SIZE;
2125
2126 i = 0;
2127 while (buf_size < byte_count) {
2128 int frag_size = byte_count - buf_size;
2129
2130 if (i < MLX5E_MAX_RX_FRAGS - 1)
2131 frag_size = min(frag_size, frag_size_max);
2132
2133 info->arr[i].frag_size = frag_size;
2134 info->arr[i].frag_stride = roundup_pow_of_two(frag_size);
2135
2136 buf_size += frag_size;
2137 i++;
2138 }
2139 info->num_frags = i;
2140
2141 info->wqe_bulk = 1 + (info->num_frags % 2);
2142
2143out:
2144 info->wqe_bulk = max_t(u8, info->wqe_bulk, 8);
2145 info->log_num_frags = order_base_2(info->num_frags);
2146}
2147
2148static inline u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs)
2149{
2150 int sz = sizeof(struct mlx5_wqe_data_seg) * ndsegs;
2151
2152 switch (wq_type) {
2153 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
2154 sz += sizeof(struct mlx5e_rx_wqe_ll);
2155 break;
2156 default:
2157 sz += sizeof(struct mlx5e_rx_wqe_cyc);
2158 }
2159
2160 return order_base_2(sz);
2161}
2162
2163static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
2164 struct mlx5e_params *params,
2165 struct mlx5e_rq_param *param)
2166{
2167 struct mlx5_core_dev *mdev = priv->mdev;
2168 void *rqc = param->rqc;
2169 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
2170 int ndsegs = 1;
2171
2172 switch (params->rq_wq_type) {
2173 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
2174 MLX5_SET(wq, wq, log_wqe_num_of_strides,
2175 mlx5e_mpwqe_get_log_num_strides(mdev, params) -
2176 MLX5_MPWQE_LOG_NUM_STRIDES_BASE);
2177 MLX5_SET(wq, wq, log_wqe_stride_size,
2178 mlx5e_mpwqe_get_log_stride_size(mdev, params) -
2179 MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
2180 MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params));
2181 break;
2182 default:
2183 MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
2184 mlx5e_build_rq_frags_info(mdev, params, ¶m->frags_info);
2185 ndsegs = param->frags_info.num_frags;
2186 }
2187
2188 MLX5_SET(wq, wq, wq_type, params->rq_wq_type);
2189 MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
2190 MLX5_SET(wq, wq, log_wq_stride,
2191 mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs));
2192 MLX5_SET(wq, wq, pd, mdev->mlx5e_res.pdn);
2193 MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
2194 MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
2195 MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
2196
2197 param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
2198}
2199
2200static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv,
2201 struct mlx5e_rq_param *param)
2202{
2203 struct mlx5_core_dev *mdev = priv->mdev;
2204 void *rqc = param->rqc;
2205 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
2206
2207 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
2208 MLX5_SET(wq, wq, log_wq_stride,
2209 mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1));
2210 MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter);
2211
2212 param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
2213}
2214
2215static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
2216 struct mlx5e_sq_param *param)
2217{
2218 void *sqc = param->sqc;
2219 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2220
2221 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
2222 MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
2223
2224 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
2225}
2226
2227static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
2228 struct mlx5e_params *params,
2229 struct mlx5e_sq_param *param)
2230{
2231 void *sqc = param->sqc;
2232 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2233
2234 mlx5e_build_sq_param_common(priv, param);
2235 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
2236 MLX5_SET(sqc, sqc, allow_swp, !!MLX5_IPSEC_DEV(priv->mdev));
2237}
2238
2239static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
2240 struct mlx5e_cq_param *param)
2241{
2242 void *cqc = param->cqc;
2243
2244 MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index);
2245}
2246
2247static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
2248 struct mlx5e_params *params,
2249 struct mlx5e_cq_param *param)
2250{
2251 struct mlx5_core_dev *mdev = priv->mdev;
2252 void *cqc = param->cqc;
2253 u8 log_cq_size;
2254
2255 switch (params->rq_wq_type) {
2256 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
2257 log_cq_size = mlx5e_mpwqe_get_log_rq_size(params) +
2258 mlx5e_mpwqe_get_log_num_strides(mdev, params);
2259 break;
2260 default:
2261 log_cq_size = params->log_rq_mtu_frames;
2262 }
2263
2264 MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
2265 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
2266 MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
2267 MLX5_SET(cqc, cqc, cqe_comp_en, 1);
2268 }
2269
2270 mlx5e_build_common_cq_param(priv, param);
2271 param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
2272}
2273
2274static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
2275 struct mlx5e_params *params,
2276 struct mlx5e_cq_param *param)
2277{
2278 void *cqc = param->cqc;
2279
2280 MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size);
2281
2282 mlx5e_build_common_cq_param(priv, param);
2283 param->cq_period_mode = params->tx_cq_moderation.cq_period_mode;
2284}
2285
2286static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
2287 u8 log_wq_size,
2288 struct mlx5e_cq_param *param)
2289{
2290 void *cqc = param->cqc;
2291
2292 MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
2293
2294 mlx5e_build_common_cq_param(priv, param);
2295
2296 param->cq_period_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
2297}
2298
2299static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
2300 u8 log_wq_size,
2301 struct mlx5e_sq_param *param)
2302{
2303 void *sqc = param->sqc;
2304 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2305
2306 mlx5e_build_sq_param_common(priv, param);
2307
2308 MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
2309 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
2310}
2311
2312static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
2313 struct mlx5e_params *params,
2314 struct mlx5e_sq_param *param)
2315{
2316 void *sqc = param->sqc;
2317 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2318
2319 mlx5e_build_sq_param_common(priv, param);
2320 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
2321}
2322
2323static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
2324 struct mlx5e_params *params,
2325 struct mlx5e_channel_param *cparam)
2326{
2327 u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
2328
2329 mlx5e_build_rq_param(priv, params, &cparam->rq);
2330 mlx5e_build_sq_param(priv, params, &cparam->sq);
2331 mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
2332 mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq);
2333 mlx5e_build_rx_cq_param(priv, params, &cparam->rx_cq);
2334 mlx5e_build_tx_cq_param(priv, params, &cparam->tx_cq);
2335 mlx5e_build_ico_cq_param(priv, icosq_log_wq_sz, &cparam->icosq_cq);
2336}
2337
2338int mlx5e_open_channels(struct mlx5e_priv *priv,
2339 struct mlx5e_channels *chs)
2340{
2341 struct mlx5e_channel_param *cparam;
2342 int err = -ENOMEM;
2343 int i;
2344
2345 chs->num = chs->params.num_channels;
2346
2347 chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL);
2348 cparam = kvzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
2349 if (!chs->c || !cparam)
2350 goto err_free;
2351
2352 mlx5e_build_channel_param(priv, &chs->params, cparam);
2353 for (i = 0; i < chs->num; i++) {
2354 err = mlx5e_open_channel(priv, i, &chs->params, cparam, &chs->c[i]);
2355 if (err)
2356 goto err_close_channels;
2357 }
2358
2359 kvfree(cparam);
2360 return 0;
2361
2362err_close_channels:
2363 for (i--; i >= 0; i--)
2364 mlx5e_close_channel(chs->c[i]);
2365
2366err_free:
2367 kfree(chs->c);
2368 kvfree(cparam);
2369 chs->num = 0;
2370 return err;
2371}
2372
2373static void mlx5e_activate_channels(struct mlx5e_channels *chs)
2374{
2375 int i;
2376
2377 for (i = 0; i < chs->num; i++)
2378 mlx5e_activate_channel(chs->c[i]);
2379}
2380
2381static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs)
2382{
2383 int err = 0;
2384 int i;
2385
2386 for (i = 0; i < chs->num; i++)
2387 err |= mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq,
2388 err ? 0 : 20000);
2389
2390 return err ? -ETIMEDOUT : 0;
2391}
2392
2393static void mlx5e_deactivate_channels(struct mlx5e_channels *chs)
2394{
2395 int i;
2396
2397 for (i = 0; i < chs->num; i++)
2398 mlx5e_deactivate_channel(chs->c[i]);
2399}
2400
2401void mlx5e_close_channels(struct mlx5e_channels *chs)
2402{
2403 int i;
2404
2405 for (i = 0; i < chs->num; i++)
2406 mlx5e_close_channel(chs->c[i]);
2407
2408 kfree(chs->c);
2409 chs->num = 0;
2410}
2411
2412static int
2413mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, struct mlx5e_rqt *rqt)
2414{
2415 struct mlx5_core_dev *mdev = priv->mdev;
2416 void *rqtc;
2417 int inlen;
2418 int err;
2419 u32 *in;
2420 int i;
2421
2422 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
2423 in = kvzalloc(inlen, GFP_KERNEL);
2424 if (!in)
2425 return -ENOMEM;
2426
2427 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
2428
2429 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
2430 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
2431
2432 for (i = 0; i < sz; i++)
2433 MLX5_SET(rqtc, rqtc, rq_num[i], priv->drop_rq.rqn);
2434
2435 err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn);
2436 if (!err)
2437 rqt->enabled = true;
2438
2439 kvfree(in);
2440 return err;
2441}
2442
2443void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt)
2444{
2445 rqt->enabled = false;
2446 mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn);
2447}
2448
2449int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv)
2450{
2451 struct mlx5e_rqt *rqt = &priv->indir_rqt;
2452 int err;
2453
2454 err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, rqt);
2455 if (err)
2456 mlx5_core_warn(priv->mdev, "create indirect rqts failed, %d\n", err);
2457 return err;
2458}
2459
2460int mlx5e_create_direct_rqts(struct mlx5e_priv *priv)
2461{
2462 struct mlx5e_rqt *rqt;
2463 int err;
2464 int ix;
2465
2466 for (ix = 0; ix < mlx5e_get_netdev_max_channels(priv->netdev); ix++) {
2467 rqt = &priv->direct_tir[ix].rqt;
2468 err = mlx5e_create_rqt(priv, 1 , rqt);
2469 if (err)
2470 goto err_destroy_rqts;
2471 }
2472
2473 return 0;
2474
2475err_destroy_rqts:
2476 mlx5_core_warn(priv->mdev, "create direct rqts failed, %d\n", err);
2477 for (ix--; ix >= 0; ix--)
2478 mlx5e_destroy_rqt(priv, &priv->direct_tir[ix].rqt);
2479
2480 return err;
2481}
2482
2483void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv)
2484{
2485 int i;
2486
2487 for (i = 0; i < mlx5e_get_netdev_max_channels(priv->netdev); i++)
2488 mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
2489}
2490
2491static int mlx5e_rx_hash_fn(int hfunc)
2492{
2493 return (hfunc == ETH_RSS_HASH_TOP) ?
2494 MLX5_RX_HASH_FN_TOEPLITZ :
2495 MLX5_RX_HASH_FN_INVERTED_XOR8;
2496}
2497
2498int mlx5e_bits_invert(unsigned long a, int size)
2499{
2500 int inv = 0;
2501 int i;
2502
2503 for (i = 0; i < size; i++)
2504 inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
2505
2506 return inv;
2507}
2508
2509static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, int sz,
2510 struct mlx5e_redirect_rqt_param rrp, void *rqtc)
2511{
2512 int i;
2513
2514 for (i = 0; i < sz; i++) {
2515 u32 rqn;
2516
2517 if (rrp.is_rss) {
2518 int ix = i;
2519
2520 if (rrp.rss.hfunc == ETH_RSS_HASH_XOR)
2521 ix = mlx5e_bits_invert(i, ilog2(sz));
2522
2523 ix = priv->rss_params.indirection_rqt[ix];
2524 rqn = rrp.rss.channels->c[ix]->rq.rqn;
2525 } else {
2526 rqn = rrp.rqn;
2527 }
2528 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
2529 }
2530}
2531
2532int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
2533 struct mlx5e_redirect_rqt_param rrp)
2534{
2535 struct mlx5_core_dev *mdev = priv->mdev;
2536 void *rqtc;
2537 int inlen;
2538 u32 *in;
2539 int err;
2540
2541 inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
2542 in = kvzalloc(inlen, GFP_KERNEL);
2543 if (!in)
2544 return -ENOMEM;
2545
2546 rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
2547
2548 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
2549 MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
2550 mlx5e_fill_rqt_rqns(priv, sz, rrp, rqtc);
2551 err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen);
2552
2553 kvfree(in);
2554 return err;
2555}
2556
2557static u32 mlx5e_get_direct_rqn(struct mlx5e_priv *priv, int ix,
2558 struct mlx5e_redirect_rqt_param rrp)
2559{
2560 if (!rrp.is_rss)
2561 return rrp.rqn;
2562
2563 if (ix >= rrp.rss.channels->num)
2564 return priv->drop_rq.rqn;
2565
2566 return rrp.rss.channels->c[ix]->rq.rqn;
2567}
2568
2569static void mlx5e_redirect_rqts(struct mlx5e_priv *priv,
2570 struct mlx5e_redirect_rqt_param rrp)
2571{
2572 u32 rqtn;
2573 int ix;
2574
2575 if (priv->indir_rqt.enabled) {
2576
2577 rqtn = priv->indir_rqt.rqtn;
2578 mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp);
2579 }
2580
2581 for (ix = 0; ix < mlx5e_get_netdev_max_channels(priv->netdev); ix++) {
2582 struct mlx5e_redirect_rqt_param direct_rrp = {
2583 .is_rss = false,
2584 {
2585 .rqn = mlx5e_get_direct_rqn(priv, ix, rrp)
2586 },
2587 };
2588
2589
2590 if (!priv->direct_tir[ix].rqt.enabled)
2591 continue;
2592
2593 rqtn = priv->direct_tir[ix].rqt.rqtn;
2594 mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp);
2595 }
2596}
2597
2598static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv,
2599 struct mlx5e_channels *chs)
2600{
2601 struct mlx5e_redirect_rqt_param rrp = {
2602 .is_rss = true,
2603 {
2604 .rss = {
2605 .channels = chs,
2606 .hfunc = priv->rss_params.hfunc,
2607 }
2608 },
2609 };
2610
2611 mlx5e_redirect_rqts(priv, rrp);
2612}
2613
2614static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv)
2615{
2616 struct mlx5e_redirect_rqt_param drop_rrp = {
2617 .is_rss = false,
2618 {
2619 .rqn = priv->drop_rq.rqn,
2620 },
2621 };
2622
2623 mlx5e_redirect_rqts(priv, drop_rrp);
2624}
2625
2626static const struct mlx5e_tirc_config tirc_default_config[MLX5E_NUM_INDIR_TIRS] = {
2627 [MLX5E_TT_IPV4_TCP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
2628 .l4_prot_type = MLX5_L4_PROT_TYPE_TCP,
2629 .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
2630 },
2631 [MLX5E_TT_IPV6_TCP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
2632 .l4_prot_type = MLX5_L4_PROT_TYPE_TCP,
2633 .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
2634 },
2635 [MLX5E_TT_IPV4_UDP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
2636 .l4_prot_type = MLX5_L4_PROT_TYPE_UDP,
2637 .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
2638 },
2639 [MLX5E_TT_IPV6_UDP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
2640 .l4_prot_type = MLX5_L4_PROT_TYPE_UDP,
2641 .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
2642 },
2643 [MLX5E_TT_IPV4_IPSEC_AH] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
2644 .l4_prot_type = 0,
2645 .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
2646 },
2647 [MLX5E_TT_IPV6_IPSEC_AH] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
2648 .l4_prot_type = 0,
2649 .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
2650 },
2651 [MLX5E_TT_IPV4_IPSEC_ESP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
2652 .l4_prot_type = 0,
2653 .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
2654 },
2655 [MLX5E_TT_IPV6_IPSEC_ESP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
2656 .l4_prot_type = 0,
2657 .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
2658 },
2659 [MLX5E_TT_IPV4] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
2660 .l4_prot_type = 0,
2661 .rx_hash_fields = MLX5_HASH_IP,
2662 },
2663 [MLX5E_TT_IPV6] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
2664 .l4_prot_type = 0,
2665 .rx_hash_fields = MLX5_HASH_IP,
2666 },
2667};
2668
2669struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt)
2670{
2671 return tirc_default_config[tt];
2672}
2673
2674static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc)
2675{
2676 if (!params->lro_en)
2677 return;
2678
2679#define ROUGH_MAX_L2_L3_HDR_SZ 256
2680
2681 MLX5_SET(tirc, tirc, lro_enable_mask,
2682 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
2683 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
2684 MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
2685 (params->lro_wqe_sz - ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
2686 MLX5_SET(tirc, tirc, lro_timeout_period_usecs, params->lro_timeout);
2687}
2688
2689void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params,
2690 const struct mlx5e_tirc_config *ttconfig,
2691 void *tirc, bool inner)
2692{
2693 void *hfso = inner ? MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner) :
2694 MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
2695
2696 MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_rx_hash_fn(rss_params->hfunc));
2697 if (rss_params->hfunc == ETH_RSS_HASH_TOP) {
2698 void *rss_key = MLX5_ADDR_OF(tirc, tirc,
2699 rx_hash_toeplitz_key);
2700 size_t len = MLX5_FLD_SZ_BYTES(tirc,
2701 rx_hash_toeplitz_key);
2702
2703 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
2704 memcpy(rss_key, rss_params->toeplitz_hash_key, len);
2705 }
2706 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2707 ttconfig->l3_prot_type);
2708 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2709 ttconfig->l4_prot_type);
2710 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2711 ttconfig->rx_hash_fields);
2712}
2713
2714void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
2715{
2716 void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
2717 struct mlx5_core_dev *mdev = priv->mdev;
2718 int ctxlen = MLX5_ST_SZ_BYTES(tirc);
2719 int tt;
2720
2721 MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
2722
2723 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
2724 memset(tirc, 0, ctxlen);
2725 mlx5e_build_indir_tir_ctx_hash(&priv->rss_params,
2726 &tirc_default_config[tt],
2727 tirc, false);
2728 mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
2729 }
2730
2731 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
2732 return;
2733
2734 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
2735 memset(tirc, 0, ctxlen);
2736 mlx5e_build_indir_tir_ctx_hash(&priv->rss_params,
2737 &tirc_default_config[tt],
2738 tirc, true);
2739 mlx5_core_modify_tir(mdev, priv->inner_indir_tir[tt].tirn, in,
2740 inlen);
2741 }
2742}
2743
2744static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
2745{
2746 struct mlx5_core_dev *mdev = priv->mdev;
2747
2748 void *in;
2749 void *tirc;
2750 int inlen;
2751 int err;
2752 int tt;
2753 int ix;
2754
2755 inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
2756 in = kvzalloc(inlen, GFP_KERNEL);
2757 if (!in)
2758 return -ENOMEM;
2759
2760 MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
2761 tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
2762
2763 mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
2764
2765 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
2766 err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in,
2767 inlen);
2768 if (err)
2769 goto free_in;
2770 }
2771
2772 for (ix = 0; ix < mlx5e_get_netdev_max_channels(priv->netdev); ix++) {
2773 err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
2774 in, inlen);
2775 if (err)
2776 goto free_in;
2777 }
2778
2779free_in:
2780 kvfree(in);
2781
2782 return err;
2783}
2784
2785static int mlx5e_set_mtu(struct mlx5_core_dev *mdev,
2786 struct mlx5e_params *params, u16 mtu)
2787{
2788 u16 hw_mtu = MLX5E_SW2HW_MTU(params, mtu);
2789 int err;
2790
2791 err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
2792 if (err)
2793 return err;
2794
2795
2796 mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
2797 return 0;
2798}
2799
2800static void mlx5e_query_mtu(struct mlx5_core_dev *mdev,
2801 struct mlx5e_params *params, u16 *mtu)
2802{
2803 u16 hw_mtu = 0;
2804 int err;
2805
2806 err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
2807 if (err || !hw_mtu)
2808 mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
2809
2810 *mtu = MLX5E_HW2SW_MTU(params, hw_mtu);
2811}
2812
2813static int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
2814{
2815 struct mlx5e_params *params = &priv->channels.params;
2816 struct net_device *netdev = priv->netdev;
2817 struct mlx5_core_dev *mdev = priv->mdev;
2818 u16 mtu;
2819 int err;
2820
2821 err = mlx5e_set_mtu(mdev, params, params->sw_mtu);
2822 if (err)
2823 return err;
2824
2825 mlx5e_query_mtu(mdev, params, &mtu);
2826 if (mtu != params->sw_mtu)
2827 netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
2828 __func__, mtu, params->sw_mtu);
2829
2830 params->sw_mtu = mtu;
2831 return 0;
2832}
2833
2834static void mlx5e_netdev_set_tcs(struct net_device *netdev)
2835{
2836 struct mlx5e_priv *priv = netdev_priv(netdev);
2837 int nch = priv->channels.params.num_channels;
2838 int ntc = priv->channels.params.num_tc;
2839 int tc;
2840
2841 netdev_reset_tc(netdev);
2842
2843 if (ntc == 1)
2844 return;
2845
2846 netdev_set_num_tc(netdev, ntc);
2847
2848
2849
2850
2851 for (tc = 0; tc < ntc; tc++)
2852 netdev_set_tc_queue(netdev, tc, nch, 0);
2853}
2854
2855static void mlx5e_build_tc2txq_maps(struct mlx5e_priv *priv)
2856{
2857 int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
2858 int i, tc;
2859
2860 for (i = 0; i < max_nch; i++)
2861 for (tc = 0; tc < priv->profile->max_tc; tc++)
2862 priv->channel_tc2txq[i][tc] = i + tc * max_nch;
2863}
2864
2865static void mlx5e_build_tx2sq_maps(struct mlx5e_priv *priv)
2866{
2867 struct mlx5e_channel *c;
2868 struct mlx5e_txqsq *sq;
2869 int i, tc;
2870
2871 for (i = 0; i < priv->channels.num; i++) {
2872 c = priv->channels.c[i];
2873 for (tc = 0; tc < c->num_tc; tc++) {
2874 sq = &c->sq[tc];
2875 priv->txq2sq[sq->txq_ix] = sq;
2876 }
2877 }
2878}
2879
2880void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
2881{
2882 int num_txqs = priv->channels.num * priv->channels.params.num_tc;
2883 struct net_device *netdev = priv->netdev;
2884
2885 mlx5e_netdev_set_tcs(netdev);
2886 netif_set_real_num_tx_queues(netdev, num_txqs);
2887 netif_set_real_num_rx_queues(netdev, priv->channels.num);
2888
2889 mlx5e_build_tx2sq_maps(priv);
2890 mlx5e_activate_channels(&priv->channels);
2891 mlx5e_xdp_tx_enable(priv);
2892 netif_tx_start_all_queues(priv->netdev);
2893
2894 if (MLX5_ESWITCH_MANAGER(priv->mdev))
2895 mlx5e_add_sqs_fwd_rules(priv);
2896
2897 mlx5e_wait_channels_min_rx_wqes(&priv->channels);
2898 mlx5e_redirect_rqts_to_channels(priv, &priv->channels);
2899}
2900
2901void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
2902{
2903 mlx5e_redirect_rqts_to_drop(priv);
2904
2905 if (MLX5_ESWITCH_MANAGER(priv->mdev))
2906 mlx5e_remove_sqs_fwd_rules(priv);
2907
2908
2909
2910
2911 netif_tx_stop_all_queues(priv->netdev);
2912 netif_tx_disable(priv->netdev);
2913 mlx5e_xdp_tx_disable(priv);
2914 mlx5e_deactivate_channels(&priv->channels);
2915}
2916
2917void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
2918 struct mlx5e_channels *new_chs,
2919 mlx5e_fp_hw_modify hw_modify)
2920{
2921 struct net_device *netdev = priv->netdev;
2922 int new_num_txqs;
2923 int carrier_ok;
2924 new_num_txqs = new_chs->num * new_chs->params.num_tc;
2925
2926 carrier_ok = netif_carrier_ok(netdev);
2927 netif_carrier_off(netdev);
2928
2929 if (new_num_txqs < netdev->real_num_tx_queues)
2930 netif_set_real_num_tx_queues(netdev, new_num_txqs);
2931
2932 mlx5e_deactivate_priv_channels(priv);
2933 mlx5e_close_channels(&priv->channels);
2934
2935 priv->channels = *new_chs;
2936
2937
2938 if (hw_modify)
2939 hw_modify(priv);
2940
2941 mlx5e_refresh_tirs(priv, false);
2942 mlx5e_activate_priv_channels(priv);
2943
2944
2945 if (carrier_ok)
2946 netif_carrier_on(netdev);
2947}
2948
2949void mlx5e_timestamp_init(struct mlx5e_priv *priv)
2950{
2951 priv->tstamp.tx_type = HWTSTAMP_TX_OFF;
2952 priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
2953}
2954
2955int mlx5e_open_locked(struct net_device *netdev)
2956{
2957 struct mlx5e_priv *priv = netdev_priv(netdev);
2958 int err;
2959
2960 set_bit(MLX5E_STATE_OPENED, &priv->state);
2961
2962 err = mlx5e_open_channels(priv, &priv->channels);
2963 if (err)
2964 goto err_clear_state_opened_flag;
2965
2966 mlx5e_refresh_tirs(priv, false);
2967 mlx5e_activate_priv_channels(priv);
2968 if (priv->profile->update_carrier)
2969 priv->profile->update_carrier(priv);
2970
2971 if (priv->profile->update_stats)
2972 queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
2973
2974 return 0;
2975
2976err_clear_state_opened_flag:
2977 clear_bit(MLX5E_STATE_OPENED, &priv->state);
2978 return err;
2979}
2980
2981int mlx5e_open(struct net_device *netdev)
2982{
2983 struct mlx5e_priv *priv = netdev_priv(netdev);
2984 int err;
2985
2986 mutex_lock(&priv->state_lock);
2987 err = mlx5e_open_locked(netdev);
2988 if (!err)
2989 mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
2990 mutex_unlock(&priv->state_lock);
2991
2992 if (mlx5_vxlan_allowed(priv->mdev->vxlan))
2993 udp_tunnel_get_rx_info(netdev);
2994
2995 return err;
2996}
2997
2998int mlx5e_close_locked(struct net_device *netdev)
2999{
3000 struct mlx5e_priv *priv = netdev_priv(netdev);
3001
3002
3003
3004
3005 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
3006 return 0;
3007
3008 clear_bit(MLX5E_STATE_OPENED, &priv->state);
3009
3010 netif_carrier_off(priv->netdev);
3011 mlx5e_deactivate_priv_channels(priv);
3012 mlx5e_close_channels(&priv->channels);
3013
3014 return 0;
3015}
3016
3017int mlx5e_close(struct net_device *netdev)
3018{
3019 struct mlx5e_priv *priv = netdev_priv(netdev);
3020 int err;
3021
3022 if (!netif_device_present(netdev))
3023 return -ENODEV;
3024
3025 mutex_lock(&priv->state_lock);
3026 mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_DOWN);
3027 err = mlx5e_close_locked(netdev);
3028 mutex_unlock(&priv->state_lock);
3029
3030 return err;
3031}
3032
3033static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
3034 struct mlx5e_rq *rq,
3035 struct mlx5e_rq_param *param)
3036{
3037 void *rqc = param->rqc;
3038 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
3039 int err;
3040
3041 param->wq.db_numa_node = param->wq.buf_numa_node;
3042
3043 err = mlx5_wq_cyc_create(mdev, ¶m->wq, rqc_wq, &rq->wqe.wq,
3044 &rq->wq_ctrl);
3045 if (err)
3046 return err;
3047
3048
3049 xdp_rxq_info_unused(&rq->xdp_rxq);
3050
3051 rq->mdev = mdev;
3052
3053 return 0;
3054}
3055
3056static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
3057 struct mlx5e_cq *cq,
3058 struct mlx5e_cq_param *param)
3059{
3060 param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
3061 param->wq.db_numa_node = dev_to_node(&mdev->pdev->dev);
3062
3063 return mlx5e_alloc_cq_common(mdev, param, cq);
3064}
3065
3066int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
3067 struct mlx5e_rq *drop_rq)
3068{
3069 struct mlx5_core_dev *mdev = priv->mdev;
3070 struct mlx5e_cq_param cq_param = {};
3071 struct mlx5e_rq_param rq_param = {};
3072 struct mlx5e_cq *cq = &drop_rq->cq;
3073 int err;
3074
3075 mlx5e_build_drop_rq_param(priv, &rq_param);
3076
3077 err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);
3078 if (err)
3079 return err;
3080
3081 err = mlx5e_create_cq(cq, &cq_param);
3082 if (err)
3083 goto err_free_cq;
3084
3085 err = mlx5e_alloc_drop_rq(mdev, drop_rq, &rq_param);
3086 if (err)
3087 goto err_destroy_cq;
3088
3089 err = mlx5e_create_rq(drop_rq, &rq_param);
3090 if (err)
3091 goto err_free_rq;
3092
3093 err = mlx5e_modify_rq_state(drop_rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
3094 if (err)
3095 mlx5_core_warn(priv->mdev, "modify_rq_state failed, rx_if_down_packets won't be counted %d\n", err);
3096
3097 return 0;
3098
3099err_free_rq:
3100 mlx5e_free_rq(drop_rq);
3101
3102err_destroy_cq:
3103 mlx5e_destroy_cq(cq);
3104
3105err_free_cq:
3106 mlx5e_free_cq(cq);
3107
3108 return err;
3109}
3110
3111void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq)
3112{
3113 mlx5e_destroy_rq(drop_rq);
3114 mlx5e_free_rq(drop_rq);
3115 mlx5e_destroy_cq(&drop_rq->cq);
3116 mlx5e_free_cq(&drop_rq->cq);
3117}
3118
3119int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn)
3120{
3121 void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
3122
3123 MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
3124
3125 if (mlx5_lag_is_lacp_owner(mdev))
3126 MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
3127
3128 return mlx5_core_create_tis(mdev, in, MLX5_ST_SZ_BYTES(create_tis_in), tisn);
3129}
3130
3131void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
3132{
3133 mlx5_core_destroy_tis(mdev, tisn);
3134}
3135
3136void mlx5e_destroy_tises(struct mlx5e_priv *priv)
3137{
3138 int tc, i;
3139
3140 for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++)
3141 for (tc = 0; tc < priv->profile->max_tc; tc++)
3142 mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]);
3143}
3144
3145static bool mlx5e_lag_should_assign_affinity(struct mlx5_core_dev *mdev)
3146{
3147 return MLX5_CAP_GEN(mdev, lag_tx_port_affinity) && mlx5e_get_num_lag_ports(mdev) > 1;
3148}
3149
3150int mlx5e_create_tises(struct mlx5e_priv *priv)
3151{
3152 int tc, i;
3153 int err;
3154
3155 for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++) {
3156 for (tc = 0; tc < priv->profile->max_tc; tc++) {
3157 u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
3158 void *tisc;
3159
3160 tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
3161
3162 MLX5_SET(tisc, tisc, prio, tc << 1);
3163
3164 if (mlx5e_lag_should_assign_affinity(priv->mdev))
3165 MLX5_SET(tisc, tisc, lag_tx_port_affinity, i + 1);
3166
3167 err = mlx5e_create_tis(priv->mdev, in, &priv->tisn[i][tc]);
3168 if (err)
3169 goto err_close_tises;
3170 }
3171 }
3172
3173 return 0;
3174
3175err_close_tises:
3176 for (; i >= 0; i--) {
3177 for (tc--; tc >= 0; tc--)
3178 mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]);
3179 tc = priv->profile->max_tc;
3180 }
3181
3182 return err;
3183}
3184
3185void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
3186{
3187 mlx5e_destroy_tises(priv);
3188}
3189
3190static void mlx5e_build_indir_tir_ctx_common(struct mlx5e_priv *priv,
3191 u32 rqtn, u32 *tirc)
3192{
3193 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
3194 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
3195 MLX5_SET(tirc, tirc, indirect_table, rqtn);
3196 MLX5_SET(tirc, tirc, tunneled_offload_en,
3197 priv->channels.params.tunneled_offload_en);
3198
3199 mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
3200}
3201
3202static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv,
3203 enum mlx5e_traffic_types tt,
3204 u32 *tirc)
3205{
3206 mlx5e_build_indir_tir_ctx_common(priv, priv->indir_rqt.rqtn, tirc);
3207 mlx5e_build_indir_tir_ctx_hash(&priv->rss_params,
3208 &tirc_default_config[tt], tirc, false);
3209}
3210
3211static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc)
3212{
3213 mlx5e_build_indir_tir_ctx_common(priv, rqtn, tirc);
3214 MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
3215}
3216
3217static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv,
3218 enum mlx5e_traffic_types tt,
3219 u32 *tirc)
3220{
3221 mlx5e_build_indir_tir_ctx_common(priv, priv->indir_rqt.rqtn, tirc);
3222 mlx5e_build_indir_tir_ctx_hash(&priv->rss_params,
3223 &tirc_default_config[tt], tirc, true);
3224}
3225
3226int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
3227{
3228 struct mlx5e_tir *tir;
3229 void *tirc;
3230 int inlen;
3231 int i = 0;
3232 int err;
3233 u32 *in;
3234 int tt;
3235
3236 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
3237 in = kvzalloc(inlen, GFP_KERNEL);
3238 if (!in)
3239 return -ENOMEM;
3240
3241 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
3242 memset(in, 0, inlen);
3243 tir = &priv->indir_tir[tt];
3244 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
3245 mlx5e_build_indir_tir_ctx(priv, tt, tirc);
3246 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
3247 if (err) {
3248 mlx5_core_warn(priv->mdev, "create indirect tirs failed, %d\n", err);
3249 goto err_destroy_inner_tirs;
3250 }
3251 }
3252
3253 if (!inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
3254 goto out;
3255
3256 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) {
3257 memset(in, 0, inlen);
3258 tir = &priv->inner_indir_tir[i];
3259 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
3260 mlx5e_build_inner_indir_tir_ctx(priv, i, tirc);
3261 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
3262 if (err) {
3263 mlx5_core_warn(priv->mdev, "create inner indirect tirs failed, %d\n", err);
3264 goto err_destroy_inner_tirs;
3265 }
3266 }
3267
3268out:
3269 kvfree(in);
3270
3271 return 0;
3272
3273err_destroy_inner_tirs:
3274 for (i--; i >= 0; i--)
3275 mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
3276
3277 for (tt--; tt >= 0; tt--)
3278 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]);
3279
3280 kvfree(in);
3281
3282 return err;
3283}
3284
3285int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
3286{
3287 int nch = mlx5e_get_netdev_max_channels(priv->netdev);
3288 struct mlx5e_tir *tir;
3289 void *tirc;
3290 int inlen;
3291 int err;
3292 u32 *in;
3293 int ix;
3294
3295 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
3296 in = kvzalloc(inlen, GFP_KERNEL);
3297 if (!in)
3298 return -ENOMEM;
3299
3300 for (ix = 0; ix < nch; ix++) {
3301 memset(in, 0, inlen);
3302 tir = &priv->direct_tir[ix];
3303 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
3304 mlx5e_build_direct_tir_ctx(priv, priv->direct_tir[ix].rqt.rqtn, tirc);
3305 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
3306 if (err)
3307 goto err_destroy_ch_tirs;
3308 }
3309
3310 kvfree(in);
3311
3312 return 0;
3313
3314err_destroy_ch_tirs:
3315 mlx5_core_warn(priv->mdev, "create direct tirs failed, %d\n", err);
3316 for (ix--; ix >= 0; ix--)
3317 mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[ix]);
3318
3319 kvfree(in);
3320
3321 return err;
3322}
3323
3324void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
3325{
3326 int i;
3327
3328 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
3329 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
3330
3331 if (!inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
3332 return;
3333
3334 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
3335 mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
3336}
3337
3338void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv)
3339{
3340 int nch = mlx5e_get_netdev_max_channels(priv->netdev);
3341 int i;
3342
3343 for (i = 0; i < nch; i++)
3344 mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[i]);
3345}
3346
3347static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable)
3348{
3349 int err = 0;
3350 int i;
3351
3352 for (i = 0; i < chs->num; i++) {
3353 err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable);
3354 if (err)
3355 return err;
3356 }
3357
3358 return 0;
3359}
3360
3361static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
3362{
3363 int err = 0;
3364 int i;
3365
3366 for (i = 0; i < chs->num; i++) {
3367 err = mlx5e_modify_rq_vsd(&chs->c[i]->rq, vsd);
3368 if (err)
3369 return err;
3370 }
3371
3372 return 0;
3373}
3374
3375static int mlx5e_setup_tc_mqprio(struct net_device *netdev,
3376 struct tc_mqprio_qopt *mqprio)
3377{
3378 struct mlx5e_priv *priv = netdev_priv(netdev);
3379 struct mlx5e_channels new_channels = {};
3380 u8 tc = mqprio->num_tc;
3381 int err = 0;
3382
3383 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
3384
3385 if (tc && tc != MLX5E_MAX_NUM_TC)
3386 return -EINVAL;
3387
3388 mutex_lock(&priv->state_lock);
3389
3390 new_channels.params = priv->channels.params;
3391 new_channels.params.num_tc = tc ? tc : 1;
3392
3393 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
3394 priv->channels.params = new_channels.params;
3395 goto out;
3396 }
3397
3398 err = mlx5e_open_channels(priv, &new_channels);
3399 if (err)
3400 goto out;
3401
3402 priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
3403 new_channels.params.num_tc);
3404 mlx5e_switch_priv_channels(priv, &new_channels, NULL);
3405out:
3406 mutex_unlock(&priv->state_lock);
3407
3408 return err;
3409}
3410
3411#ifdef CONFIG_MLX5_ESWITCH
3412static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
3413 struct tc_cls_flower_offload *cls_flower,
3414 int flags)
3415{
3416 switch (cls_flower->command) {
3417 case TC_CLSFLOWER_REPLACE:
3418 return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
3419 flags);
3420 case TC_CLSFLOWER_DESTROY:
3421 return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
3422 flags);
3423 case TC_CLSFLOWER_STATS:
3424 return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
3425 flags);
3426 default:
3427 return -EOPNOTSUPP;
3428 }
3429}
3430
3431static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3432 void *cb_priv)
3433{
3434 struct mlx5e_priv *priv = cb_priv;
3435
3436 switch (type) {
3437 case TC_SETUP_CLSFLOWER:
3438 return mlx5e_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS);
3439 default:
3440 return -EOPNOTSUPP;
3441 }
3442}
3443
3444static int mlx5e_setup_tc_block(struct net_device *dev,
3445 struct tc_block_offload *f)
3446{
3447 struct mlx5e_priv *priv = netdev_priv(dev);
3448
3449 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3450 return -EOPNOTSUPP;
3451
3452 switch (f->command) {
3453 case TC_BLOCK_BIND:
3454 return tcf_block_cb_register(f->block, mlx5e_setup_tc_block_cb,
3455 priv, priv);
3456 case TC_BLOCK_UNBIND:
3457 tcf_block_cb_unregister(f->block, mlx5e_setup_tc_block_cb,
3458 priv);
3459 return 0;
3460 default:
3461 return -EOPNOTSUPP;
3462 }
3463}
3464#endif
3465
3466static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
3467 void *type_data)
3468{
3469 switch (type) {
3470#ifdef CONFIG_MLX5_ESWITCH
3471 case TC_SETUP_BLOCK:
3472 return mlx5e_setup_tc_block(dev, type_data);
3473#endif
3474 case TC_SETUP_QDISC_MQPRIO:
3475 return mlx5e_setup_tc_mqprio(dev, type_data);
3476 default:
3477 return -EOPNOTSUPP;
3478 }
3479}
3480
3481void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
3482{
3483 int i;
3484
3485 for (i = 0; i < mlx5e_get_netdev_max_channels(priv->netdev); i++) {
3486 struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i];
3487 struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
3488 int j;
3489
3490 s->rx_packets += rq_stats->packets;
3491 s->rx_bytes += rq_stats->bytes;
3492
3493 for (j = 0; j < priv->max_opened_tc; j++) {
3494 struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
3495
3496 s->tx_packets += sq_stats->packets;
3497 s->tx_bytes += sq_stats->bytes;
3498 s->tx_dropped += sq_stats->dropped;
3499 }
3500 }
3501}
3502
3503static void
3504mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
3505{
3506 struct mlx5e_priv *priv = netdev_priv(dev);
3507 struct mlx5e_vport_stats *vstats = &priv->stats.vport;
3508 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
3509
3510 if (mlx5e_is_uplink_rep(priv)) {
3511 stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
3512 stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
3513 stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
3514 stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
3515 } else {
3516 mlx5e_fold_sw_stats64(priv, stats);
3517 }
3518
3519 stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
3520
3521 stats->rx_length_errors =
3522 PPORT_802_3_GET(pstats, a_in_range_length_errors) +
3523 PPORT_802_3_GET(pstats, a_out_of_range_length_field) +
3524 PPORT_802_3_GET(pstats, a_frame_too_long_errors);
3525 stats->rx_crc_errors =
3526 PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
3527 stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
3528 stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
3529 stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
3530 stats->rx_frame_errors;
3531 stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
3532
3533
3534
3535
3536 stats->multicast =
3537 VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
3538}
3539
3540static void mlx5e_set_rx_mode(struct net_device *dev)
3541{
3542 struct mlx5e_priv *priv = netdev_priv(dev);
3543
3544 queue_work(priv->wq, &priv->set_rx_mode_work);
3545}
3546
3547static int mlx5e_set_mac(struct net_device *netdev, void *addr)
3548{
3549 struct mlx5e_priv *priv = netdev_priv(netdev);
3550 struct sockaddr *saddr = addr;
3551
3552 if (!is_valid_ether_addr(saddr->sa_data))
3553 return -EADDRNOTAVAIL;
3554
3555 netif_addr_lock_bh(netdev);
3556 ether_addr_copy(netdev->dev_addr, saddr->sa_data);
3557 netif_addr_unlock_bh(netdev);
3558
3559 queue_work(priv->wq, &priv->set_rx_mode_work);
3560
3561 return 0;
3562}
3563
3564#define MLX5E_SET_FEATURE(features, feature, enable) \
3565 do { \
3566 if (enable) \
3567 *features |= feature; \
3568 else \
3569 *features &= ~feature; \
3570 } while (0)
3571
3572typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
3573
3574static int set_feature_lro(struct net_device *netdev, bool enable)
3575{
3576 struct mlx5e_priv *priv = netdev_priv(netdev);
3577 struct mlx5_core_dev *mdev = priv->mdev;
3578 struct mlx5e_channels new_channels = {};
3579 struct mlx5e_params *old_params;
3580 int err = 0;
3581 bool reset;
3582
3583 mutex_lock(&priv->state_lock);
3584
3585 old_params = &priv->channels.params;
3586 if (enable && !MLX5E_GET_PFLAG(old_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
3587 netdev_warn(netdev, "can't set LRO with legacy RQ\n");
3588 err = -EINVAL;
3589 goto out;
3590 }
3591
3592 reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
3593
3594 new_channels.params = *old_params;
3595 new_channels.params.lro_en = enable;
3596
3597 if (old_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
3598 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, old_params) ==
3599 mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params))
3600 reset = false;
3601 }
3602
3603 if (!reset) {
3604 *old_params = new_channels.params;
3605 err = mlx5e_modify_tirs_lro(priv);
3606 goto out;
3607 }
3608
3609 err = mlx5e_open_channels(priv, &new_channels);
3610 if (err)
3611 goto out;
3612
3613 mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_modify_tirs_lro);
3614out:
3615 mutex_unlock(&priv->state_lock);
3616 return err;
3617}
3618
3619static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
3620{
3621 struct mlx5e_priv *priv = netdev_priv(netdev);
3622
3623 if (enable)
3624 mlx5e_enable_cvlan_filter(priv);
3625 else
3626 mlx5e_disable_cvlan_filter(priv);
3627
3628 return 0;
3629}
3630
3631#ifdef CONFIG_MLX5_ESWITCH
3632static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
3633{
3634 struct mlx5e_priv *priv = netdev_priv(netdev);
3635
3636 if (!enable && mlx5e_tc_num_filters(priv)) {
3637 netdev_err(netdev,
3638 "Active offloaded tc filters, can't turn hw_tc_offload off\n");
3639 return -EINVAL;
3640 }
3641
3642 return 0;
3643}
3644#endif
3645
3646static int set_feature_rx_all(struct net_device *netdev, bool enable)
3647{
3648 struct mlx5e_priv *priv = netdev_priv(netdev);
3649 struct mlx5_core_dev *mdev = priv->mdev;
3650
3651 return mlx5_set_port_fcs(mdev, !enable);
3652}
3653
3654static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
3655{
3656 struct mlx5e_priv *priv = netdev_priv(netdev);
3657 int err;
3658
3659 mutex_lock(&priv->state_lock);
3660
3661 priv->channels.params.scatter_fcs_en = enable;
3662 err = mlx5e_modify_channels_scatter_fcs(&priv->channels, enable);
3663 if (err)
3664 priv->channels.params.scatter_fcs_en = !enable;
3665
3666 mutex_unlock(&priv->state_lock);
3667
3668 return err;
3669}
3670
3671static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
3672{
3673 struct mlx5e_priv *priv = netdev_priv(netdev);
3674 int err = 0;
3675
3676 mutex_lock(&priv->state_lock);
3677
3678 priv->channels.params.vlan_strip_disable = !enable;
3679 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
3680 goto unlock;
3681
3682 err = mlx5e_modify_channels_vsd(&priv->channels, !enable);
3683 if (err)
3684 priv->channels.params.vlan_strip_disable = enable;
3685
3686unlock:
3687 mutex_unlock(&priv->state_lock);
3688
3689 return err;
3690}
3691
3692#ifdef CONFIG_MLX5_EN_ARFS
3693static int set_feature_arfs(struct net_device *netdev, bool enable)
3694{
3695 struct mlx5e_priv *priv = netdev_priv(netdev);
3696 int err;
3697
3698 if (enable)
3699 err = mlx5e_arfs_enable(priv);
3700 else
3701 err = mlx5e_arfs_disable(priv);
3702
3703 return err;
3704}
3705#endif
3706
3707static int mlx5e_handle_feature(struct net_device *netdev,
3708 netdev_features_t *features,
3709 netdev_features_t wanted_features,
3710 netdev_features_t feature,
3711 mlx5e_feature_handler feature_handler)
3712{
3713 netdev_features_t changes = wanted_features ^ netdev->features;
3714 bool enable = !!(wanted_features & feature);
3715 int err;
3716
3717 if (!(changes & feature))
3718 return 0;
3719
3720 err = feature_handler(netdev, enable);
3721 if (err) {
3722 netdev_err(netdev, "%s feature %pNF failed, err %d\n",
3723 enable ? "Enable" : "Disable", &feature, err);
3724 return err;
3725 }
3726
3727 MLX5E_SET_FEATURE(features, feature, enable);
3728 return 0;
3729}
3730
3731static int mlx5e_set_features(struct net_device *netdev,
3732 netdev_features_t features)
3733{
3734 netdev_features_t oper_features = netdev->features;
3735 int err = 0;
3736
3737#define MLX5E_HANDLE_FEATURE(feature, handler) \
3738 mlx5e_handle_feature(netdev, &oper_features, features, feature, handler)
3739
3740 err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
3741 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
3742 set_feature_cvlan_filter);
3743#ifdef CONFIG_MLX5_ESWITCH
3744 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters);
3745#endif
3746 err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
3747 err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
3748 err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
3749#ifdef CONFIG_MLX5_EN_ARFS
3750 err |= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE, set_feature_arfs);
3751#endif
3752
3753 if (err) {
3754 netdev->features = oper_features;
3755 return -EINVAL;
3756 }
3757
3758 return 0;
3759}
3760
3761static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
3762 netdev_features_t features)
3763{
3764 struct mlx5e_priv *priv = netdev_priv(netdev);
3765 struct mlx5e_params *params;
3766
3767 mutex_lock(&priv->state_lock);
3768 params = &priv->channels.params;
3769 if (!bitmap_empty(priv->fs.vlan.active_svlans, VLAN_N_VID)) {
3770
3771
3772
3773 features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3774 if (!params->vlan_strip_disable)
3775 netdev_warn(netdev, "Dropping C-tag vlan stripping offload due to S-tag vlan\n");
3776 }
3777 if (!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
3778 if (features & NETIF_F_LRO) {
3779 netdev_warn(netdev, "Disabling LRO, not supported in legacy RQ\n");
3780 features &= ~NETIF_F_LRO;
3781 }
3782 }
3783
3784 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
3785 features &= ~NETIF_F_RXHASH;
3786 if (netdev->features & NETIF_F_RXHASH)
3787 netdev_warn(netdev, "Disabling rxhash, not supported when CQE compress is active\n");
3788 }
3789
3790 mutex_unlock(&priv->state_lock);
3791
3792 return features;
3793}
3794
3795int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
3796 change_hw_mtu_cb set_mtu_cb)
3797{
3798 struct mlx5e_priv *priv = netdev_priv(netdev);
3799 struct mlx5e_channels new_channels = {};
3800 struct mlx5e_params *params;
3801 int err = 0;
3802 bool reset;
3803
3804 mutex_lock(&priv->state_lock);
3805
3806 params = &priv->channels.params;
3807
3808 reset = !params->lro_en;
3809 reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state);
3810
3811 new_channels.params = *params;
3812 new_channels.params.sw_mtu = new_mtu;
3813
3814 if (params->xdp_prog &&
3815 !mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) {
3816 netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n",
3817 new_mtu, MLX5E_XDP_MAX_MTU);
3818 err = -EINVAL;
3819 goto out;
3820 }
3821
3822 if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
3823 bool is_linear = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, &new_channels.params);
3824 u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params);
3825 u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params);
3826
3827 reset = reset && (is_linear || (ppw_old != ppw_new));
3828 }
3829
3830 if (!reset) {
3831 params->sw_mtu = new_mtu;
3832 if (set_mtu_cb)
3833 set_mtu_cb(priv);
3834 netdev->mtu = params->sw_mtu;
3835 goto out;
3836 }
3837
3838 err = mlx5e_open_channels(priv, &new_channels);
3839 if (err)
3840 goto out;
3841
3842 mlx5e_switch_priv_channels(priv, &new_channels, set_mtu_cb);
3843 netdev->mtu = new_channels.params.sw_mtu;
3844
3845out:
3846 mutex_unlock(&priv->state_lock);
3847 return err;
3848}
3849
3850static int mlx5e_change_nic_mtu(struct net_device *netdev, int new_mtu)
3851{
3852 return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu);
3853}
3854
3855int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
3856{
3857 struct hwtstamp_config config;
3858 int err;
3859
3860 if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) ||
3861 (mlx5_clock_get_ptp_index(priv->mdev) == -1))
3862 return -EOPNOTSUPP;
3863
3864 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
3865 return -EFAULT;
3866
3867
3868 switch (config.tx_type) {
3869 case HWTSTAMP_TX_OFF:
3870 case HWTSTAMP_TX_ON:
3871 break;
3872 default:
3873 return -ERANGE;
3874 }
3875
3876 mutex_lock(&priv->state_lock);
3877
3878 switch (config.rx_filter) {
3879 case HWTSTAMP_FILTER_NONE:
3880
3881 mlx5e_modify_rx_cqe_compression_locked(priv, priv->channels.params.rx_cqe_compress_def);
3882 break;
3883 case HWTSTAMP_FILTER_ALL:
3884 case HWTSTAMP_FILTER_SOME:
3885 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3886 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3887 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3888 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3889 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3890 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3891 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
3892 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
3893 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
3894 case HWTSTAMP_FILTER_PTP_V2_EVENT:
3895 case HWTSTAMP_FILTER_PTP_V2_SYNC:
3896 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
3897 case HWTSTAMP_FILTER_NTP_ALL:
3898
3899 netdev_warn(priv->netdev, "Disabling cqe compression");
3900 err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
3901 if (err) {
3902 netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
3903 mutex_unlock(&priv->state_lock);
3904 return err;
3905 }
3906 config.rx_filter = HWTSTAMP_FILTER_ALL;
3907 break;
3908 default:
3909 mutex_unlock(&priv->state_lock);
3910 return -ERANGE;
3911 }
3912
3913 memcpy(&priv->tstamp, &config, sizeof(config));
3914 mutex_unlock(&priv->state_lock);
3915
3916
3917 netdev_update_features(priv->netdev);
3918
3919 return copy_to_user(ifr->ifr_data, &config,
3920 sizeof(config)) ? -EFAULT : 0;
3921}
3922
3923int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr)
3924{
3925 struct hwtstamp_config *cfg = &priv->tstamp;
3926
3927 if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
3928 return -EOPNOTSUPP;
3929
3930 return copy_to_user(ifr->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0;
3931}
3932
3933static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3934{
3935 struct mlx5e_priv *priv = netdev_priv(dev);
3936
3937 switch (cmd) {
3938 case SIOCSHWTSTAMP:
3939 return mlx5e_hwstamp_set(priv, ifr);
3940 case SIOCGHWTSTAMP:
3941 return mlx5e_hwstamp_get(priv, ifr);
3942 default:
3943 return -EOPNOTSUPP;
3944 }
3945}
3946
3947#ifdef CONFIG_MLX5_ESWITCH
3948static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
3949{
3950 struct mlx5e_priv *priv = netdev_priv(dev);
3951 struct mlx5_core_dev *mdev = priv->mdev;
3952
3953 return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac);
3954}
3955
3956static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
3957 __be16 vlan_proto)
3958{
3959 struct mlx5e_priv *priv = netdev_priv(dev);
3960 struct mlx5_core_dev *mdev = priv->mdev;
3961
3962 if (vlan_proto != htons(ETH_P_8021Q))
3963 return -EPROTONOSUPPORT;
3964
3965 return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1,
3966 vlan, qos);
3967}
3968
3969static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
3970{
3971 struct mlx5e_priv *priv = netdev_priv(dev);
3972 struct mlx5_core_dev *mdev = priv->mdev;
3973
3974 return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting);
3975}
3976
3977static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
3978{
3979 struct mlx5e_priv *priv = netdev_priv(dev);
3980 struct mlx5_core_dev *mdev = priv->mdev;
3981
3982 return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
3983}
3984
3985static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
3986 int max_tx_rate)
3987{
3988 struct mlx5e_priv *priv = netdev_priv(dev);
3989 struct mlx5_core_dev *mdev = priv->mdev;
3990
3991 return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1,
3992 max_tx_rate, min_tx_rate);
3993}
3994
3995static int mlx5_vport_link2ifla(u8 esw_link)
3996{
3997 switch (esw_link) {
3998 case MLX5_VPORT_ADMIN_STATE_DOWN:
3999 return IFLA_VF_LINK_STATE_DISABLE;
4000 case MLX5_VPORT_ADMIN_STATE_UP:
4001 return IFLA_VF_LINK_STATE_ENABLE;
4002 }
4003 return IFLA_VF_LINK_STATE_AUTO;
4004}
4005
4006static int mlx5_ifla_link2vport(u8 ifla_link)
4007{
4008 switch (ifla_link) {
4009 case IFLA_VF_LINK_STATE_DISABLE:
4010 return MLX5_VPORT_ADMIN_STATE_DOWN;
4011 case IFLA_VF_LINK_STATE_ENABLE:
4012 return MLX5_VPORT_ADMIN_STATE_UP;
4013 }
4014 return MLX5_VPORT_ADMIN_STATE_AUTO;
4015}
4016
4017static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
4018 int link_state)
4019{
4020 struct mlx5e_priv *priv = netdev_priv(dev);
4021 struct mlx5_core_dev *mdev = priv->mdev;
4022
4023 return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1,
4024 mlx5_ifla_link2vport(link_state));
4025}
4026
4027static int mlx5e_get_vf_config(struct net_device *dev,
4028 int vf, struct ifla_vf_info *ivi)
4029{
4030 struct mlx5e_priv *priv = netdev_priv(dev);
4031 struct mlx5_core_dev *mdev = priv->mdev;
4032 int err;
4033
4034 err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi);
4035 if (err)
4036 return err;
4037 ivi->linkstate = mlx5_vport_link2ifla(ivi->linkstate);
4038 return 0;
4039}
4040
4041static int mlx5e_get_vf_stats(struct net_device *dev,
4042 int vf, struct ifla_vf_stats *vf_stats)
4043{
4044 struct mlx5e_priv *priv = netdev_priv(dev);
4045 struct mlx5_core_dev *mdev = priv->mdev;
4046
4047 return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1,
4048 vf_stats);
4049}
4050#endif
4051
4052struct mlx5e_vxlan_work {
4053 struct work_struct work;
4054 struct mlx5e_priv *priv;
4055 u16 port;
4056};
4057
4058static void mlx5e_vxlan_add_work(struct work_struct *work)
4059{
4060 struct mlx5e_vxlan_work *vxlan_work =
4061 container_of(work, struct mlx5e_vxlan_work, work);
4062 struct mlx5e_priv *priv = vxlan_work->priv;
4063 u16 port = vxlan_work->port;
4064
4065 mutex_lock(&priv->state_lock);
4066 mlx5_vxlan_add_port(priv->mdev->vxlan, port);
4067 mutex_unlock(&priv->state_lock);
4068
4069 kfree(vxlan_work);
4070}
4071
4072static void mlx5e_vxlan_del_work(struct work_struct *work)
4073{
4074 struct mlx5e_vxlan_work *vxlan_work =
4075 container_of(work, struct mlx5e_vxlan_work, work);
4076 struct mlx5e_priv *priv = vxlan_work->priv;
4077 u16 port = vxlan_work->port;
4078
4079 mutex_lock(&priv->state_lock);
4080 mlx5_vxlan_del_port(priv->mdev->vxlan, port);
4081 mutex_unlock(&priv->state_lock);
4082 kfree(vxlan_work);
4083}
4084
4085static void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, u16 port, int add)
4086{
4087 struct mlx5e_vxlan_work *vxlan_work;
4088
4089 vxlan_work = kmalloc(sizeof(*vxlan_work), GFP_ATOMIC);
4090 if (!vxlan_work)
4091 return;
4092
4093 if (add)
4094 INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_work);
4095 else
4096 INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_work);
4097
4098 vxlan_work->priv = priv;
4099 vxlan_work->port = port;
4100 queue_work(priv->wq, &vxlan_work->work);
4101}
4102
4103static void mlx5e_add_vxlan_port(struct net_device *netdev,
4104 struct udp_tunnel_info *ti)
4105{
4106 struct mlx5e_priv *priv = netdev_priv(netdev);
4107
4108 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
4109 return;
4110
4111 if (!mlx5_vxlan_allowed(priv->mdev->vxlan))
4112 return;
4113
4114 mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 1);
4115}
4116
4117static void mlx5e_del_vxlan_port(struct net_device *netdev,
4118 struct udp_tunnel_info *ti)
4119{
4120 struct mlx5e_priv *priv = netdev_priv(netdev);
4121
4122 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
4123 return;
4124
4125 if (!mlx5_vxlan_allowed(priv->mdev->vxlan))
4126 return;
4127
4128 mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 0);
4129}
4130
4131static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
4132 struct sk_buff *skb,
4133 netdev_features_t features)
4134{
4135 unsigned int offset = 0;
4136 struct udphdr *udph;
4137 u8 proto;
4138 u16 port;
4139
4140 switch (vlan_get_protocol(skb)) {
4141 case htons(ETH_P_IP):
4142 proto = ip_hdr(skb)->protocol;
4143 break;
4144 case htons(ETH_P_IPV6):
4145 proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
4146 break;
4147 default:
4148 goto out;
4149 }
4150
4151 switch (proto) {
4152 case IPPROTO_GRE:
4153 return features;
4154 case IPPROTO_UDP:
4155 udph = udp_hdr(skb);
4156 port = be16_to_cpu(udph->dest);
4157
4158
4159 if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port))
4160 return features;
4161 }
4162
4163out:
4164
4165 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
4166}
4167
4168static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
4169 struct net_device *netdev,
4170 netdev_features_t features)
4171{
4172 struct mlx5e_priv *priv = netdev_priv(netdev);
4173
4174 features = vlan_features_check(skb, features);
4175 features = vxlan_features_check(skb, features);
4176
4177#ifdef CONFIG_MLX5_EN_IPSEC
4178 if (mlx5e_ipsec_feature_check(skb, netdev, features))
4179 return features;
4180#endif
4181
4182
4183 if (skb->encapsulation &&
4184 (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
4185 return mlx5e_tunnel_features_check(priv, skb, features);
4186
4187 return features;
4188}
4189
4190static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev,
4191 struct mlx5e_txqsq *sq)
4192{
4193 struct mlx5_eq_comp *eq = sq->cq.mcq.eq;
4194 u32 eqe_count;
4195
4196 netdev_err(dev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n",
4197 eq->core.eqn, eq->core.cons_index, eq->core.irqn);
4198
4199 eqe_count = mlx5_eq_poll_irq_disabled(eq);
4200 if (!eqe_count)
4201 return false;
4202
4203 netdev_err(dev, "Recover %d eqes on EQ 0x%x\n", eqe_count, eq->core.eqn);
4204 sq->channel->stats->eq_rearm++;
4205 return true;
4206}
4207
4208static void mlx5e_tx_timeout_work(struct work_struct *work)
4209{
4210 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
4211 tx_timeout_work);
4212 struct net_device *dev = priv->netdev;
4213 bool reopen_channels = false;
4214 int i, err;
4215
4216 rtnl_lock();
4217 mutex_lock(&priv->state_lock);
4218
4219 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
4220 goto unlock;
4221
4222 for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) {
4223 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, i);
4224 struct mlx5e_txqsq *sq = priv->txq2sq[i];
4225
4226 if (!netif_xmit_stopped(dev_queue))
4227 continue;
4228
4229 netdev_err(dev,
4230 "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n",
4231 i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
4232 jiffies_to_usecs(jiffies - dev_queue->trans_start));
4233
4234
4235
4236
4237 if (!mlx5e_tx_timeout_eq_recover(dev, sq)) {
4238 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
4239 reopen_channels = true;
4240 }
4241 }
4242
4243 if (!reopen_channels)
4244 goto unlock;
4245
4246 mlx5e_close_locked(dev);
4247 err = mlx5e_open_locked(dev);
4248 if (err)
4249 netdev_err(priv->netdev,
4250 "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
4251 err);
4252
4253unlock:
4254 mutex_unlock(&priv->state_lock);
4255 rtnl_unlock();
4256}
4257
4258static void mlx5e_tx_timeout(struct net_device *dev)
4259{
4260 struct mlx5e_priv *priv = netdev_priv(dev);
4261
4262 netdev_err(dev, "TX timeout detected\n");
4263 queue_work(priv->wq, &priv->tx_timeout_work);
4264}
4265
4266static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
4267{
4268 struct net_device *netdev = priv->netdev;
4269 struct mlx5e_channels new_channels = {};
4270
4271 if (priv->channels.params.lro_en) {
4272 netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
4273 return -EINVAL;
4274 }
4275
4276 if (MLX5_IPSEC_DEV(priv->mdev)) {
4277 netdev_warn(netdev, "can't set XDP with IPSec offload\n");
4278 return -EINVAL;
4279 }
4280
4281 new_channels.params = priv->channels.params;
4282 new_channels.params.xdp_prog = prog;
4283
4284 if (!mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) {
4285 netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n",
4286 new_channels.params.sw_mtu, MLX5E_XDP_MAX_MTU);
4287 return -EINVAL;
4288 }
4289
4290 return 0;
4291}
4292
4293static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
4294{
4295 struct mlx5e_priv *priv = netdev_priv(netdev);
4296 struct bpf_prog *old_prog;
4297 bool reset, was_opened;
4298 int err = 0;
4299 int i;
4300
4301 mutex_lock(&priv->state_lock);
4302
4303 if (prog) {
4304 err = mlx5e_xdp_allowed(priv, prog);
4305 if (err)
4306 goto unlock;
4307 }
4308
4309 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
4310
4311 reset = (!priv->channels.params.xdp_prog || !prog);
4312
4313 if (was_opened && reset)
4314 mlx5e_close_locked(netdev);
4315 if (was_opened && !reset) {
4316
4317
4318
4319 prog = bpf_prog_add(prog, priv->channels.num);
4320 if (IS_ERR(prog)) {
4321 err = PTR_ERR(prog);
4322 goto unlock;
4323 }
4324 }
4325
4326
4327
4328
4329 old_prog = xchg(&priv->channels.params.xdp_prog, prog);
4330 if (old_prog)
4331 bpf_prog_put(old_prog);
4332
4333 if (reset)
4334 mlx5e_set_rq_type(priv->mdev, &priv->channels.params);
4335
4336 if (was_opened && reset)
4337 mlx5e_open_locked(netdev);
4338
4339 if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset)
4340 goto unlock;
4341
4342
4343
4344
4345 for (i = 0; i < priv->channels.num; i++) {
4346 struct mlx5e_channel *c = priv->channels.c[i];
4347
4348 clear_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
4349 napi_synchronize(&c->napi);
4350
4351
4352 old_prog = xchg(&c->rq.xdp_prog, prog);
4353
4354 set_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
4355
4356 napi_schedule(&c->napi);
4357
4358 if (old_prog)
4359 bpf_prog_put(old_prog);
4360 }
4361
4362unlock:
4363 mutex_unlock(&priv->state_lock);
4364 return err;
4365}
4366
4367static u32 mlx5e_xdp_query(struct net_device *dev)
4368{
4369 struct mlx5e_priv *priv = netdev_priv(dev);
4370 const struct bpf_prog *xdp_prog;
4371 u32 prog_id = 0;
4372
4373 mutex_lock(&priv->state_lock);
4374 xdp_prog = priv->channels.params.xdp_prog;
4375 if (xdp_prog)
4376 prog_id = xdp_prog->aux->id;
4377 mutex_unlock(&priv->state_lock);
4378
4379 return prog_id;
4380}
4381
4382static int mlx5e_xdp(struct net_device *dev, struct netdev_xdp *xdp)
4383{
4384 switch (xdp->command) {
4385 case XDP_SETUP_PROG:
4386 return mlx5e_xdp_set(dev, xdp->prog);
4387 case XDP_QUERY_PROG:
4388 xdp->prog_id = mlx5e_xdp_query(dev);
4389 return 0;
4390 default:
4391 return -EINVAL;
4392 }
4393}
4394
4395static int mlx5e_pf_get_phys_port_name(struct net_device *dev,
4396 char *buf, size_t len)
4397{
4398 struct mlx5e_priv *priv = netdev_priv(dev);
4399 struct mlx5_eswitch *esw;
4400 unsigned int fn;
4401 int ret;
4402
4403 esw = priv->mdev->priv.eswitch;
4404 if (!esw || esw->mode != SRIOV_OFFLOADS)
4405 return -EOPNOTSUPP;
4406
4407 fn = PCI_FUNC(priv->mdev->pdev->devfn);
4408 if (fn >= MLX5_MAX_PORTS)
4409 return -EOPNOTSUPP;
4410
4411 ret = snprintf(buf, len, "p%d", fn);
4412 if (ret >= len)
4413 return -EOPNOTSUPP;
4414
4415 return 0;
4416}
4417
4418const struct net_device_ops mlx5e_netdev_ops = {
4419 .ndo_size = sizeof(struct net_device_ops),
4420 .ndo_open = mlx5e_open,
4421 .ndo_stop = mlx5e_close,
4422 .ndo_start_xmit = mlx5e_xmit,
4423 .extended.ndo_get_phys_port_name = mlx5e_pf_get_phys_port_name,
4424 .extended.ndo_setup_tc_rh = mlx5e_setup_tc,
4425 .ndo_select_queue = mlx5e_select_queue,
4426 .ndo_get_stats64 = mlx5e_get_stats,
4427 .ndo_set_rx_mode = mlx5e_set_rx_mode,
4428 .ndo_set_mac_address = mlx5e_set_mac,
4429 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
4430 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
4431 .ndo_set_features = mlx5e_set_features,
4432 .ndo_fix_features = mlx5e_fix_features,
4433 .extended.ndo_change_mtu = mlx5e_change_nic_mtu,
4434 .ndo_do_ioctl = mlx5e_ioctl,
4435 .extended.ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
4436 .extended.ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
4437 .extended.ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
4438 .ndo_features_check = mlx5e_features_check,
4439 .ndo_tx_timeout = mlx5e_tx_timeout,
4440 .extended.ndo_xdp = mlx5e_xdp,
4441 .extended.ndo_xdp_xmit = mlx5e_xdp_xmit,
4442#ifdef CONFIG_MLX5_EN_ARFS
4443 .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
4444#endif
4445#ifdef CONFIG_MLX5_ESWITCH
4446
4447 .ndo_set_vf_mac = mlx5e_set_vf_mac,
4448 .ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk,
4449 .extended.ndo_set_vf_trust = mlx5e_set_vf_trust,
4450 .extended.ndo_set_vf_vlan = mlx5e_set_vf_vlan,
4451 .ndo_set_vf_rate = mlx5e_set_vf_rate,
4452 .ndo_get_vf_config = mlx5e_get_vf_config,
4453 .ndo_set_vf_link_state = mlx5e_set_vf_link_state,
4454 .ndo_get_vf_stats = mlx5e_get_vf_stats,
4455 .extended.ndo_has_offload_stats = mlx5e_has_offload_stats,
4456 .extended.ndo_get_offload_stats = mlx5e_get_offload_stats,
4457#endif
4458};
4459
4460static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
4461{
4462 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
4463 return -EOPNOTSUPP;
4464 if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
4465 !MLX5_CAP_GEN(mdev, nic_flow_table) ||
4466 !MLX5_CAP_ETH(mdev, csum_cap) ||
4467 !MLX5_CAP_ETH(mdev, max_lso_cap) ||
4468 !MLX5_CAP_ETH(mdev, vlan_cap) ||
4469 !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
4470 MLX5_CAP_FLOWTABLE(mdev,
4471 flow_table_properties_nic_receive.max_ft_level)
4472 < 3) {
4473 mlx5_core_warn(mdev,
4474 "Not creating net device, some required device capabilities are missing\n");
4475 return -EOPNOTSUPP;
4476 }
4477 if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
4478 mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
4479 if (!MLX5_CAP_GEN(mdev, cq_moderation))
4480 mlx5_core_warn(mdev, "CQ moderation is not supported\n");
4481
4482 return 0;
4483}
4484
4485void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
4486 int num_channels)
4487{
4488 int i;
4489
4490 for (i = 0; i < len; i++)
4491 indirection_rqt[i] = i % num_channels;
4492}
4493
4494static bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
4495{
4496 u32 link_speed = 0;
4497 u32 pci_bw = 0;
4498
4499 mlx5e_port_max_linkspeed(mdev, &link_speed);
4500 pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL);
4501 mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n",
4502 link_speed, pci_bw);
4503
4504#define MLX5E_SLOW_PCI_RATIO (2)
4505
4506 return link_speed && pci_bw &&
4507 link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw;
4508}
4509
4510static struct net_dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
4511{
4512 struct net_dim_cq_moder moder;
4513
4514 moder.cq_period_mode = cq_period_mode;
4515 moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
4516 moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
4517 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
4518 moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE;
4519
4520 return moder;
4521}
4522
4523static struct net_dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
4524{
4525 struct net_dim_cq_moder moder;
4526
4527 moder.cq_period_mode = cq_period_mode;
4528 moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
4529 moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
4530 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
4531 moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
4532
4533 return moder;
4534}
4535
4536static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode)
4537{
4538 return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ?
4539 NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE :
4540 NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4541}
4542
4543void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
4544{
4545 if (params->tx_dim_enabled) {
4546 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
4547
4548 params->tx_cq_moderation = net_dim_get_def_tx_moderation(dim_period_mode);
4549 } else {
4550 params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode);
4551 }
4552
4553 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
4554 params->tx_cq_moderation.cq_period_mode ==
4555 MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
4556}
4557
4558void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
4559{
4560 if (params->rx_dim_enabled) {
4561 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
4562
4563 params->rx_cq_moderation = net_dim_get_def_rx_moderation(dim_period_mode);
4564 } else {
4565 params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode);
4566 }
4567
4568 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
4569 params->rx_cq_moderation.cq_period_mode ==
4570 MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
4571}
4572
4573static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
4574{
4575 int i;
4576
4577
4578 for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
4579 if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
4580 break;
4581
4582 return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
4583}
4584
4585void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
4586 struct mlx5e_params *params)
4587{
4588
4589
4590
4591
4592
4593 if (!slow_pci_heuristic(mdev) &&
4594 mlx5e_striding_rq_possible(mdev, params) &&
4595 (mlx5e_rx_mpwqe_is_linear_skb(mdev, params) ||
4596 !mlx5e_rx_is_linear_skb(mdev, params)))
4597 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true);
4598 mlx5e_set_rq_type(mdev, params);
4599 mlx5e_init_rq_type_params(mdev, params);
4600}
4601
4602void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
4603 u16 num_channels)
4604{
4605 rss_params->hfunc = ETH_RSS_HASH_TOP;
4606 netdev_rss_key_fill(rss_params->toeplitz_hash_key,
4607 sizeof(rss_params->toeplitz_hash_key));
4608 mlx5e_build_default_indir_rqt(rss_params->indirection_rqt,
4609 MLX5E_INDIR_RQT_SIZE, num_channels);
4610}
4611
4612void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
4613 struct mlx5e_rss_params *rss_params,
4614 struct mlx5e_params *params,
4615 u16 max_channels, u16 mtu)
4616{
4617 u8 rx_cq_period_mode;
4618
4619 params->sw_mtu = mtu;
4620 params->hard_mtu = MLX5E_ETH_HARD_MTU;
4621 params->num_channels = max_channels;
4622 params->num_tc = 1;
4623
4624
4625 params->log_sq_size = is_kdump_kernel() ?
4626 MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
4627 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
4628
4629
4630 params->rx_cqe_compress_def = false;
4631 if (MLX5_CAP_GEN(mdev, cqe_compression) &&
4632 MLX5_CAP_GEN(mdev, vport_group_manager))
4633 params->rx_cqe_compress_def = slow_pci_heuristic(mdev);
4634
4635 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
4636 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE, false);
4637
4638
4639 mlx5e_build_rq_params(mdev, params);
4640
4641
4642
4643
4644 if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
4645 if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params))
4646 params->lro_en = !slow_pci_heuristic(mdev);
4647 params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
4648
4649
4650 rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
4651 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
4652 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
4653 params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
4654 params->tx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
4655 mlx5e_set_rx_cq_mode_params(params, rx_cq_period_mode);
4656 mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
4657
4658
4659 params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev);
4660
4661
4662 mlx5e_build_rss_params(rss_params, params->num_channels);
4663 params->tunneled_offload_en =
4664 mlx5e_tunnel_inner_ft_supported(mdev);
4665}
4666
4667static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
4668{
4669 struct mlx5e_priv *priv = netdev_priv(netdev);
4670
4671 mlx5_query_nic_vport_mac_address(priv->mdev, 0, netdev->dev_addr);
4672 if (is_zero_ether_addr(netdev->dev_addr) &&
4673 !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
4674 eth_hw_addr_random(netdev);
4675 mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
4676 }
4677}
4678
4679#if IS_ENABLED(CONFIG_MLX5_ESWITCH)
4680static const struct switchdev_ops mlx5e_switchdev_ops = {
4681 .switchdev_port_attr_get = mlx5e_attr_get,
4682};
4683#endif
4684
4685static void mlx5e_build_nic_netdev(struct net_device *netdev)
4686{
4687 struct mlx5e_priv *priv = netdev_priv(netdev);
4688 struct mlx5_core_dev *mdev = priv->mdev;
4689 bool fcs_supported;
4690 bool fcs_enabled;
4691
4692 SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
4693
4694 netdev->netdev_ops = &mlx5e_netdev_ops;
4695
4696#ifdef CONFIG_MLX5_CORE_EN_DCB
4697 if (MLX5_CAP_GEN(mdev, vport_group_manager) && MLX5_CAP_GEN(mdev, qos))
4698 netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
4699 netdev->dcbnl_ops_ext = &mlx5e_dcbnl_ops_ext;
4700#endif
4701
4702 netdev->watchdog_timeo = 15 * HZ;
4703
4704 netdev->ethtool_ops = &mlx5e_ethtool_ops;
4705
4706 netdev->vlan_features |= NETIF_F_SG;
4707 netdev->vlan_features |= NETIF_F_IP_CSUM;
4708 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
4709 netdev->vlan_features |= NETIF_F_GRO;
4710 netdev->vlan_features |= NETIF_F_TSO;
4711 netdev->vlan_features |= NETIF_F_TSO6;
4712 netdev->vlan_features |= NETIF_F_RXCSUM;
4713 netdev->vlan_features |= NETIF_F_RXHASH;
4714
4715 netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_TX;
4716 netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_RX;
4717
4718 if (!!MLX5_CAP_ETH(mdev, lro_cap) &&
4719 mlx5e_check_fragmented_striding_rq_cap(mdev))
4720 netdev->vlan_features |= NETIF_F_LRO;
4721
4722 netdev->hw_features = netdev->vlan_features;
4723 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
4724 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
4725 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4726 netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
4727
4728 if (mlx5_vxlan_allowed(mdev->vxlan) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
4729 netdev->hw_enc_features |= NETIF_F_IP_CSUM;
4730 netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
4731 netdev->hw_enc_features |= NETIF_F_TSO;
4732 netdev->hw_enc_features |= NETIF_F_TSO6;
4733 netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL;
4734 }
4735
4736 if (mlx5_vxlan_allowed(mdev->vxlan)) {
4737 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
4738 NETIF_F_GSO_UDP_TUNNEL_CSUM;
4739 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
4740 NETIF_F_GSO_UDP_TUNNEL_CSUM;
4741 netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
4742 netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL |
4743 NETIF_F_GSO_UDP_TUNNEL_CSUM;
4744 }
4745
4746 if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
4747 netdev->hw_features |= NETIF_F_GSO_GRE |
4748 NETIF_F_GSO_GRE_CSUM;
4749 netdev->hw_enc_features |= NETIF_F_GSO_GRE |
4750 NETIF_F_GSO_GRE_CSUM;
4751 netdev->gso_partial_features |= NETIF_F_GSO_GRE |
4752 NETIF_F_GSO_GRE_CSUM;
4753 }
4754
4755 netdev->hw_features |= NETIF_F_GSO_PARTIAL;
4756 netdev->gso_partial_features |= NETIF_F_GSO_UDP_L4;
4757 netdev->hw_features |= NETIF_F_GSO_UDP_L4;
4758 netdev->features |= NETIF_F_GSO_UDP_L4;
4759
4760 mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
4761
4762 if (fcs_supported)
4763 netdev->hw_features |= NETIF_F_RXALL;
4764
4765 if (MLX5_CAP_ETH(mdev, scatter_fcs))
4766 netdev->hw_features |= NETIF_F_RXFCS;
4767
4768 netdev->features = netdev->hw_features;
4769 if (!priv->channels.params.lro_en)
4770 netdev->features &= ~NETIF_F_LRO;
4771
4772 if (fcs_enabled)
4773 netdev->features &= ~NETIF_F_RXALL;
4774
4775 if (!priv->channels.params.scatter_fcs_en)
4776 netdev->features &= ~NETIF_F_RXFCS;
4777
4778
4779 if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
4780 netdev->features &= ~NETIF_F_RXHASH;
4781
4782#define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
4783 if (FT_CAP(flow_modify_en) &&
4784 FT_CAP(modify_root) &&
4785 FT_CAP(identified_miss_table_mode) &&
4786 FT_CAP(flow_table_modify)) {
4787#ifdef CONFIG_MLX5_ESWITCH
4788 netdev->hw_features |= NETIF_F_HW_TC;
4789#endif
4790#ifdef CONFIG_MLX5_EN_ARFS
4791 netdev->hw_features |= NETIF_F_NTUPLE;
4792#endif
4793 }
4794
4795 netdev->features |= NETIF_F_HIGHDMA;
4796 netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
4797
4798 netdev->priv_flags |= IFF_UNICAST_FLT;
4799
4800 mlx5e_set_netdev_dev_addr(netdev);
4801
4802#if IS_ENABLED(CONFIG_MLX5_ESWITCH)
4803 if (MLX5_ESWITCH_MANAGER(mdev))
4804 netdev->switchdev_ops = &mlx5e_switchdev_ops;
4805#endif
4806
4807 mlx5e_ipsec_build_netdev(priv);
4808 mlx5e_tls_build_netdev(priv);
4809}
4810
4811void mlx5e_create_q_counters(struct mlx5e_priv *priv)
4812{
4813 struct mlx5_core_dev *mdev = priv->mdev;
4814 int err;
4815
4816 err = mlx5_core_alloc_q_counter(mdev, &priv->q_counter);
4817 if (err) {
4818 mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err);
4819 priv->q_counter = 0;
4820 }
4821
4822 err = mlx5_core_alloc_q_counter(mdev, &priv->drop_rq_q_counter);
4823 if (err) {
4824 mlx5_core_warn(mdev, "alloc drop RQ counter failed, %d\n", err);
4825 priv->drop_rq_q_counter = 0;
4826 }
4827}
4828
4829void mlx5e_destroy_q_counters(struct mlx5e_priv *priv)
4830{
4831 if (priv->q_counter)
4832 mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
4833
4834 if (priv->drop_rq_q_counter)
4835 mlx5_core_dealloc_q_counter(priv->mdev, priv->drop_rq_q_counter);
4836}
4837
4838static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
4839 struct net_device *netdev,
4840 const struct mlx5e_profile *profile,
4841 void *ppriv)
4842{
4843 struct mlx5e_priv *priv = netdev_priv(netdev);
4844 struct mlx5e_rss_params *rss = &priv->rss_params;
4845 int err;
4846
4847 err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
4848 if (err)
4849 return err;
4850
4851 mlx5e_build_nic_params(mdev, rss, &priv->channels.params,
4852 mlx5e_get_netdev_max_channels(netdev),
4853 netdev->mtu);
4854
4855 mlx5e_timestamp_init(priv);
4856
4857 err = mlx5e_ipsec_init(priv);
4858 if (err)
4859 mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
4860 err = mlx5e_tls_init(priv);
4861 if (err)
4862 mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
4863 mlx5e_build_nic_netdev(netdev);
4864 mlx5e_build_tc2txq_maps(priv);
4865
4866 return 0;
4867}
4868
4869static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
4870{
4871 mlx5e_tls_cleanup(priv);
4872 mlx5e_ipsec_cleanup(priv);
4873 mlx5e_netdev_cleanup(priv->netdev, priv);
4874
4875 if (priv->channels.params.xdp_prog)
4876 bpf_prog_put(priv->channels.params.xdp_prog);
4877}
4878
4879static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
4880{
4881 struct mlx5_core_dev *mdev = priv->mdev;
4882 int err;
4883
4884 mlx5e_create_q_counters(priv);
4885
4886 err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
4887 if (err) {
4888 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
4889 goto err_destroy_q_counters;
4890 }
4891
4892 err = mlx5e_create_indirect_rqt(priv);
4893 if (err)
4894 goto err_close_drop_rq;
4895
4896 err = mlx5e_create_direct_rqts(priv);
4897 if (err)
4898 goto err_destroy_indirect_rqts;
4899
4900 err = mlx5e_create_indirect_tirs(priv, true);
4901 if (err)
4902 goto err_destroy_direct_rqts;
4903
4904 err = mlx5e_create_direct_tirs(priv);
4905 if (err)
4906 goto err_destroy_indirect_tirs;
4907
4908 err = mlx5e_create_flow_steering(priv);
4909 if (err) {
4910 mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
4911 goto err_destroy_direct_tirs;
4912 }
4913
4914 err = mlx5e_tc_nic_init(priv);
4915 if (err)
4916 goto err_destroy_flow_steering;
4917
4918 return 0;
4919
4920err_destroy_flow_steering:
4921 mlx5e_destroy_flow_steering(priv);
4922err_destroy_direct_tirs:
4923 mlx5e_destroy_direct_tirs(priv);
4924err_destroy_indirect_tirs:
4925 mlx5e_destroy_indirect_tirs(priv, true);
4926err_destroy_direct_rqts:
4927 mlx5e_destroy_direct_rqts(priv);
4928err_destroy_indirect_rqts:
4929 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
4930err_close_drop_rq:
4931 mlx5e_close_drop_rq(&priv->drop_rq);
4932err_destroy_q_counters:
4933 mlx5e_destroy_q_counters(priv);
4934 return err;
4935}
4936
4937static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
4938{
4939 mlx5e_tc_nic_cleanup(priv);
4940 mlx5e_destroy_flow_steering(priv);
4941 mlx5e_destroy_direct_tirs(priv);
4942 mlx5e_destroy_indirect_tirs(priv, true);
4943 mlx5e_destroy_direct_rqts(priv);
4944 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
4945 mlx5e_close_drop_rq(&priv->drop_rq);
4946 mlx5e_destroy_q_counters(priv);
4947}
4948
4949static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
4950{
4951 int err;
4952
4953 err = mlx5e_create_tises(priv);
4954 if (err) {
4955 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
4956 return err;
4957 }
4958
4959#ifdef CONFIG_MLX5_CORE_EN_DCB
4960 mlx5e_dcbnl_initialize(priv);
4961#endif
4962 return 0;
4963}
4964
4965static void mlx5e_nic_enable(struct mlx5e_priv *priv)
4966{
4967 struct net_device *netdev = priv->netdev;
4968 struct mlx5_core_dev *mdev = priv->mdev;
4969 u16 max_mtu;
4970
4971 mlx5e_init_l2_addr(priv);
4972
4973
4974 if (!netif_running(netdev))
4975 mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
4976
4977
4978 netdev->extended->min_mtu = ETH_MIN_MTU;
4979 mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
4980 netdev->extended->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
4981 mlx5e_set_dev_port_mtu(priv);
4982
4983 mlx5_lag_add(mdev, netdev);
4984
4985 mlx5e_enable_async_events(priv);
4986
4987 if (MLX5_ESWITCH_MANAGER(priv->mdev))
4988 mlx5e_register_vport_reps(priv);
4989
4990 if (netdev->reg_state != NETREG_REGISTERED)
4991 return;
4992#ifdef CONFIG_MLX5_CORE_EN_DCB
4993 mlx5e_dcbnl_init_app(priv);
4994#endif
4995
4996 queue_work(priv->wq, &priv->set_rx_mode_work);
4997
4998 rtnl_lock();
4999 if (netif_running(netdev))
5000 mlx5e_open(netdev);
5001 netif_device_attach(netdev);
5002 rtnl_unlock();
5003}
5004
5005static void mlx5e_nic_disable(struct mlx5e_priv *priv)
5006{
5007 struct mlx5_core_dev *mdev = priv->mdev;
5008
5009#ifdef CONFIG_MLX5_CORE_EN_DCB
5010 if (priv->netdev->reg_state == NETREG_REGISTERED)
5011 mlx5e_dcbnl_delete_app(priv);
5012#endif
5013
5014 rtnl_lock();
5015 if (netif_running(priv->netdev))
5016 mlx5e_close(priv->netdev);
5017 netif_device_detach(priv->netdev);
5018 rtnl_unlock();
5019
5020 queue_work(priv->wq, &priv->set_rx_mode_work);
5021
5022 if (MLX5_ESWITCH_MANAGER(priv->mdev))
5023 mlx5e_unregister_vport_reps(priv);
5024
5025 mlx5e_disable_async_events(priv);
5026 mlx5_lag_remove(mdev);
5027}
5028
5029static const struct mlx5e_profile mlx5e_nic_profile = {
5030 .init = mlx5e_nic_init,
5031 .cleanup = mlx5e_nic_cleanup,
5032 .init_rx = mlx5e_init_nic_rx,
5033 .cleanup_rx = mlx5e_cleanup_nic_rx,
5034 .init_tx = mlx5e_init_nic_tx,
5035 .cleanup_tx = mlx5e_cleanup_nic_tx,
5036 .enable = mlx5e_nic_enable,
5037 .disable = mlx5e_nic_disable,
5038 .update_stats = mlx5e_update_ndo_stats,
5039 .update_carrier = mlx5e_update_carrier,
5040 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe,
5041 .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
5042 .max_tc = MLX5E_MAX_NUM_TC,
5043};
5044
5045
5046
5047
5048int mlx5e_netdev_init(struct net_device *netdev,
5049 struct mlx5e_priv *priv,
5050 struct mlx5_core_dev *mdev,
5051 const struct mlx5e_profile *profile,
5052 void *ppriv)
5053{
5054
5055 priv->mdev = mdev;
5056 priv->netdev = netdev;
5057 priv->profile = profile;
5058 priv->ppriv = ppriv;
5059 priv->msglevel = MLX5E_MSG_LEVEL;
5060 priv->max_opened_tc = 1;
5061
5062 mutex_init(&priv->state_lock);
5063 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
5064 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
5065 INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
5066 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
5067
5068 priv->wq = create_singlethread_workqueue("mlx5e");
5069 if (!priv->wq)
5070 return -ENOMEM;
5071
5072
5073 netif_carrier_off(netdev);
5074
5075#ifdef CONFIG_MLX5_EN_ARFS
5076 netdev->rx_cpu_rmap = mlx5_eq_table_get_rmap(mdev);
5077#endif
5078
5079 return 0;
5080}
5081
5082void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv)
5083{
5084 destroy_workqueue(priv->wq);
5085}
5086
5087struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
5088 const struct mlx5e_profile *profile,
5089 int nch,
5090 void *ppriv)
5091{
5092 struct net_device *netdev;
5093 int err;
5094
5095 netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
5096 nch * profile->max_tc,
5097 nch);
5098 if (!netdev) {
5099 mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
5100 return NULL;
5101 }
5102
5103 err = profile->init(mdev, netdev, profile, ppriv);
5104 if (err) {
5105 mlx5_core_err(mdev, "failed to init mlx5e profile %d\n", err);
5106 goto err_free_netdev;
5107 }
5108
5109 return netdev;
5110
5111err_free_netdev:
5112 free_netdev(netdev);
5113
5114 return NULL;
5115}
5116
5117int mlx5e_attach_netdev(struct mlx5e_priv *priv)
5118{
5119 const struct mlx5e_profile *profile;
5120 int max_nch;
5121 int err;
5122
5123 profile = priv->profile;
5124 clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
5125
5126
5127 max_nch = mlx5e_get_max_num_channels(priv->mdev);
5128 if (priv->channels.params.num_channels > max_nch) {
5129 mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch);
5130 priv->channels.params.num_channels = max_nch;
5131 mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
5132 MLX5E_INDIR_RQT_SIZE, max_nch);
5133 }
5134
5135 err = profile->init_tx(priv);
5136 if (err)
5137 goto out;
5138
5139 err = profile->init_rx(priv);
5140 if (err)
5141 goto err_cleanup_tx;
5142
5143 if (profile->enable)
5144 profile->enable(priv);
5145
5146 return 0;
5147
5148err_cleanup_tx:
5149 profile->cleanup_tx(priv);
5150
5151out:
5152 return err;
5153}
5154
5155void mlx5e_detach_netdev(struct mlx5e_priv *priv)
5156{
5157 const struct mlx5e_profile *profile = priv->profile;
5158
5159 set_bit(MLX5E_STATE_DESTROYING, &priv->state);
5160
5161 if (profile->disable)
5162 profile->disable(priv);
5163 flush_workqueue(priv->wq);
5164
5165 profile->cleanup_rx(priv);
5166 profile->cleanup_tx(priv);
5167 cancel_delayed_work_sync(&priv->update_stats_work);
5168}
5169
5170void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
5171{
5172 const struct mlx5e_profile *profile = priv->profile;
5173 struct net_device *netdev = priv->netdev;
5174
5175 if (profile->cleanup)
5176 profile->cleanup(priv);
5177 free_netdev(netdev);
5178}
5179
5180
5181
5182
5183static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv)
5184{
5185 struct mlx5e_priv *priv = vpriv;
5186 struct net_device *netdev = priv->netdev;
5187 int err;
5188
5189 if (netif_device_present(netdev))
5190 return 0;
5191
5192 err = mlx5e_create_mdev_resources(mdev);
5193 if (err)
5194 return err;
5195
5196 err = mlx5e_attach_netdev(priv);
5197 if (err) {
5198 mlx5e_destroy_mdev_resources(mdev);
5199 return err;
5200 }
5201
5202 return 0;
5203}
5204
5205static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv)
5206{
5207 struct mlx5e_priv *priv = vpriv;
5208 struct net_device *netdev = priv->netdev;
5209
5210 if (!netif_device_present(netdev))
5211 return;
5212
5213 mlx5e_detach_netdev(priv);
5214 mlx5e_destroy_mdev_resources(mdev);
5215}
5216
5217static void *mlx5e_add(struct mlx5_core_dev *mdev)
5218{
5219 struct net_device *netdev;
5220 void *rpriv = NULL;
5221 void *priv;
5222 int err;
5223 int nch;
5224
5225 err = mlx5e_check_required_hca_cap(mdev);
5226 if (err)
5227 return NULL;
5228
5229#ifdef CONFIG_MLX5_ESWITCH
5230 if (MLX5_ESWITCH_MANAGER(mdev)) {
5231 rpriv = mlx5e_alloc_nic_rep_priv(mdev);
5232 if (!rpriv) {
5233 mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n");
5234 return NULL;
5235 }
5236 }
5237#endif
5238
5239 nch = mlx5e_get_max_num_channels(mdev);
5240 netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, nch, rpriv);
5241 if (!netdev) {
5242 mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
5243 goto err_free_rpriv;
5244 }
5245
5246 priv = netdev_priv(netdev);
5247
5248 err = mlx5e_attach(mdev, priv);
5249 if (err) {
5250 mlx5_core_err(mdev, "mlx5e_attach failed, %d\n", err);
5251 goto err_destroy_netdev;
5252 }
5253
5254 err = register_netdev(netdev);
5255 if (err) {
5256 mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
5257 goto err_detach;
5258 }
5259
5260#ifdef CONFIG_MLX5_CORE_EN_DCB
5261 mlx5e_dcbnl_init_app(priv);
5262#endif
5263 return priv;
5264
5265err_detach:
5266 mlx5e_detach(mdev, priv);
5267err_destroy_netdev:
5268 mlx5e_destroy_netdev(priv);
5269err_free_rpriv:
5270 kfree(rpriv);
5271 return NULL;
5272}
5273
5274static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
5275{
5276 struct mlx5e_priv *priv = vpriv;
5277 void *ppriv = priv->ppriv;
5278
5279#ifdef CONFIG_MLX5_CORE_EN_DCB
5280 mlx5e_dcbnl_delete_app(priv);
5281#endif
5282 unregister_netdev(priv->netdev);
5283 mlx5e_detach(mdev, vpriv);
5284 mlx5e_destroy_netdev(priv);
5285 kfree(ppriv);
5286}
5287
5288static void *mlx5e_get_netdev(void *vpriv)
5289{
5290 struct mlx5e_priv *priv = vpriv;
5291
5292 return priv->netdev;
5293}
5294
5295static struct mlx5_interface mlx5e_interface = {
5296 .add = mlx5e_add,
5297 .remove = mlx5e_remove,
5298 .attach = mlx5e_attach,
5299 .detach = mlx5e_detach,
5300 .event = mlx5e_async_event,
5301 .protocol = MLX5_INTERFACE_PROTOCOL_ETH,
5302 .get_dev = mlx5e_get_netdev,
5303};
5304
5305void mlx5e_init(void)
5306{
5307 mlx5e_ipsec_build_inverse_table();
5308 mlx5e_build_ptys2ethtool_map();
5309 mlx5_register_interface(&mlx5e_interface);
5310}
5311
5312void mlx5e_cleanup(void)
5313{
5314 mlx5_unregister_interface(&mlx5e_interface);
5315}
5316