1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <generated/utsrelease.h>
34#include <linux/mlx5/fs.h>
35#include <net/switchdev.h>
36#include <net/pkt_cls.h>
37#include <net/act_api.h>
38#include <net/netevent.h>
39#include <net/arp.h>
40
41#include "eswitch.h"
42#include "en.h"
43#include "en_rep.h"
44#include "en_tc.h"
45#include "fs_core.h"
46
47#define MLX5E_REP_PARAMS_LOG_SQ_SIZE \
48 max(0x6, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
49#define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
50
51static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
52
53static void mlx5e_rep_get_drvinfo(struct net_device *dev,
54 struct ethtool_drvinfo *drvinfo)
55{
56 strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
57 sizeof(drvinfo->driver));
58 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
59}
60
61static const struct counter_desc sw_rep_stats_desc[] = {
62 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
63 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
64 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
65 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
66};
67
68struct vport_stats {
69 u64 vport_rx_packets;
70 u64 vport_tx_packets;
71 u64 vport_rx_bytes;
72 u64 vport_tx_bytes;
73};
74
75static const struct counter_desc vport_rep_stats_desc[] = {
76 { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_packets) },
77 { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_bytes) },
78 { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_packets) },
79 { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_bytes) },
80};
81
82#define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
83#define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc)
84
85static void mlx5e_rep_get_strings(struct net_device *dev,
86 u32 stringset, uint8_t *data)
87{
88 int i, j;
89
90 switch (stringset) {
91 case ETH_SS_STATS:
92 for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
93 strcpy(data + (i * ETH_GSTRING_LEN),
94 sw_rep_stats_desc[i].format);
95 for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
96 strcpy(data + (i * ETH_GSTRING_LEN),
97 vport_rep_stats_desc[j].format);
98 break;
99 }
100}
101
102static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
103{
104 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
105 struct mlx5e_rep_priv *rpriv = priv->ppriv;
106 struct mlx5_eswitch_rep *rep = rpriv->rep;
107 struct rtnl_link_stats64 *vport_stats;
108 struct ifla_vf_stats vf_stats;
109 int err;
110
111 err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
112 if (err) {
113 pr_warn("vport %d error %d reading stats\n", rep->vport, err);
114 return;
115 }
116
117 vport_stats = &priv->stats.vf_vport;
118
119 vport_stats->rx_packets = vf_stats.tx_packets;
120 vport_stats->rx_bytes = vf_stats.tx_bytes;
121 vport_stats->tx_packets = vf_stats.rx_packets;
122 vport_stats->tx_bytes = vf_stats.rx_bytes;
123}
124
125static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
126{
127 struct mlx5e_sw_stats *s = &priv->stats.sw;
128 struct mlx5e_rq_stats *rq_stats;
129 struct mlx5e_sq_stats *sq_stats;
130 int i, j;
131
132 memset(s, 0, sizeof(*s));
133 for (i = 0; i < priv->channels.num; i++) {
134 struct mlx5e_channel *c = priv->channels.c[i];
135
136 rq_stats = c->rq.stats;
137
138 s->rx_packets += rq_stats->packets;
139 s->rx_bytes += rq_stats->bytes;
140
141 for (j = 0; j < priv->channels.params.num_tc; j++) {
142 sq_stats = c->sq[j].stats;
143
144 s->tx_packets += sq_stats->packets;
145 s->tx_bytes += sq_stats->bytes;
146 }
147 }
148}
149
150static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
151 struct ethtool_stats *stats, u64 *data)
152{
153 struct mlx5e_priv *priv = netdev_priv(dev);
154 int i, j;
155
156 if (!data)
157 return;
158
159 mutex_lock(&priv->state_lock);
160 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
161 mlx5e_rep_update_sw_counters(priv);
162 mlx5e_rep_update_hw_counters(priv);
163 mutex_unlock(&priv->state_lock);
164
165 for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
166 data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
167 sw_rep_stats_desc, i);
168
169 for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
170 data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport,
171 vport_rep_stats_desc, j);
172}
173
174static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
175{
176 switch (sset) {
177 case ETH_SS_STATS:
178 return NUM_VPORT_REP_SW_COUNTERS + NUM_VPORT_REP_HW_COUNTERS;
179 default:
180 return -EOPNOTSUPP;
181 }
182}
183
184static void mlx5e_rep_get_ringparam(struct net_device *dev,
185 struct ethtool_ringparam *param)
186{
187 struct mlx5e_priv *priv = netdev_priv(dev);
188
189 mlx5e_ethtool_get_ringparam(priv, param);
190}
191
192static int mlx5e_rep_set_ringparam(struct net_device *dev,
193 struct ethtool_ringparam *param)
194{
195 struct mlx5e_priv *priv = netdev_priv(dev);
196
197 return mlx5e_ethtool_set_ringparam(priv, param);
198}
199
200static int mlx5e_replace_rep_vport_rx_rule(struct mlx5e_priv *priv,
201 struct mlx5_flow_destination *dest)
202{
203 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
204 struct mlx5e_rep_priv *rpriv = priv->ppriv;
205 struct mlx5_eswitch_rep *rep = rpriv->rep;
206 struct mlx5_flow_handle *flow_rule;
207
208 flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
209 rep->vport,
210 dest);
211 if (IS_ERR(flow_rule))
212 return PTR_ERR(flow_rule);
213
214 mlx5_del_flow_rules(rpriv->vport_rx_rule);
215 rpriv->vport_rx_rule = flow_rule;
216 return 0;
217}
218
219static void mlx5e_rep_get_channels(struct net_device *dev,
220 struct ethtool_channels *ch)
221{
222 struct mlx5e_priv *priv = netdev_priv(dev);
223
224 mlx5e_ethtool_get_channels(priv, ch);
225}
226
227static int mlx5e_rep_set_channels(struct net_device *dev,
228 struct ethtool_channels *ch)
229{
230 struct mlx5e_priv *priv = netdev_priv(dev);
231 u16 curr_channels_amount = priv->channels.params.num_channels;
232 u32 new_channels_amount = ch->combined_count;
233 struct mlx5_flow_destination new_dest;
234 int err = 0;
235
236 err = mlx5e_ethtool_set_channels(priv, ch);
237 if (err)
238 return err;
239
240 if (curr_channels_amount == 1 && new_channels_amount > 1) {
241 new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
242 new_dest.ft = priv->fs.ttc.ft.t;
243 } else if (new_channels_amount == 1 && curr_channels_amount > 1) {
244 new_dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
245 new_dest.tir_num = priv->direct_tir[0].tirn;
246 } else {
247 return 0;
248 }
249
250 err = mlx5e_replace_rep_vport_rx_rule(priv, &new_dest);
251 if (err) {
252 netdev_warn(priv->netdev, "Failed to update vport rx rule, when going from (%d) channels to (%d) channels\n",
253 curr_channels_amount, new_channels_amount);
254 return err;
255 }
256
257 return 0;
258}
259
260static u32 mlx5e_rep_get_rxfh_key_size(struct net_device *netdev)
261{
262 struct mlx5e_priv *priv = netdev_priv(netdev);
263
264 return mlx5e_ethtool_get_rxfh_key_size(priv);
265}
266
267static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev)
268{
269 struct mlx5e_priv *priv = netdev_priv(netdev);
270
271 return mlx5e_ethtool_get_rxfh_indir_size(priv);
272}
273
274static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
275 .get_drvinfo = mlx5e_rep_get_drvinfo,
276 .get_link = ethtool_op_get_link,
277 .get_strings = mlx5e_rep_get_strings,
278 .get_sset_count = mlx5e_rep_get_sset_count,
279 .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
280 .get_ringparam = mlx5e_rep_get_ringparam,
281 .set_ringparam = mlx5e_rep_set_ringparam,
282 .get_channels = mlx5e_rep_get_channels,
283 .set_channels = mlx5e_rep_set_channels,
284 .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size,
285 .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
286};
287
288int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
289{
290 struct mlx5e_priv *priv = netdev_priv(dev);
291 struct mlx5e_rep_priv *rpriv = priv->ppriv;
292 struct mlx5_eswitch_rep *rep = rpriv->rep;
293 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
294
295 if (esw->mode == SRIOV_NONE)
296 return -EOPNOTSUPP;
297
298 switch (attr->id) {
299 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
300 attr->u.ppid.id_len = ETH_ALEN;
301 ether_addr_copy(attr->u.ppid.id, rep->hw_id);
302 break;
303 default:
304 return -EOPNOTSUPP;
305 }
306
307 return 0;
308}
309
310static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
311 struct mlx5_eswitch_rep *rep)
312{
313 struct mlx5e_rep_sq *rep_sq, *tmp;
314 struct mlx5e_rep_priv *rpriv;
315
316 if (esw->mode != SRIOV_OFFLOADS)
317 return;
318
319 rpriv = mlx5e_rep_to_rep_priv(rep);
320 list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) {
321 mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
322 list_del(&rep_sq->list);
323 kfree(rep_sq);
324 }
325}
326
327static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
328 struct mlx5_eswitch_rep *rep,
329 u32 *sqns_array, int sqns_num)
330{
331 struct mlx5_flow_handle *flow_rule;
332 struct mlx5e_rep_priv *rpriv;
333 struct mlx5e_rep_sq *rep_sq;
334 int err;
335 int i;
336
337 if (esw->mode != SRIOV_OFFLOADS)
338 return 0;
339
340 rpriv = mlx5e_rep_to_rep_priv(rep);
341 for (i = 0; i < sqns_num; i++) {
342 rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
343 if (!rep_sq) {
344 err = -ENOMEM;
345 goto out_err;
346 }
347
348
349 flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
350 rep->vport,
351 sqns_array[i]);
352 if (IS_ERR(flow_rule)) {
353 err = PTR_ERR(flow_rule);
354 kfree(rep_sq);
355 goto out_err;
356 }
357 rep_sq->send_to_vport_rule = flow_rule;
358 list_add(&rep_sq->list, &rpriv->vport_sqs_list);
359 }
360 return 0;
361
362out_err:
363 mlx5e_sqs2vport_stop(esw, rep);
364 return err;
365}
366
367int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
368{
369 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
370 struct mlx5e_rep_priv *rpriv = priv->ppriv;
371 struct mlx5_eswitch_rep *rep = rpriv->rep;
372 struct mlx5e_channel *c;
373 int n, tc, num_sqs = 0;
374 int err = -ENOMEM;
375 u32 *sqs;
376
377 sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(*sqs), GFP_KERNEL);
378 if (!sqs)
379 goto out;
380
381 for (n = 0; n < priv->channels.num; n++) {
382 c = priv->channels.c[n];
383 for (tc = 0; tc < c->num_tc; tc++)
384 sqs[num_sqs++] = c->sq[tc].sqn;
385 }
386
387 err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
388 kfree(sqs);
389
390out:
391 if (err)
392 netdev_warn(priv->netdev, "Failed to add SQs FWD rules %d\n", err);
393 return err;
394}
395
396void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
397{
398 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
399 struct mlx5e_rep_priv *rpriv = priv->ppriv;
400 struct mlx5_eswitch_rep *rep = rpriv->rep;
401
402 mlx5e_sqs2vport_stop(esw, rep);
403}
404
405static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
406{
407#if IS_ENABLED(CONFIG_IPV6)
408 unsigned long ipv6_interval = NEIGH_VAR(&nd_tbl.parms,
409 DELAY_PROBE_TIME);
410#else
411 unsigned long ipv6_interval = ~0UL;
412#endif
413 unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms,
414 DELAY_PROBE_TIME);
415 struct net_device *netdev = rpriv->netdev;
416 struct mlx5e_priv *priv = netdev_priv(netdev);
417
418 rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval);
419 mlx5_fc_update_sampling_interval(priv->mdev, rpriv->neigh_update.min_interval);
420}
421
422void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv)
423{
424 struct mlx5e_rep_priv *rpriv = priv->ppriv;
425 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
426
427 mlx5_fc_queue_stats_work(priv->mdev,
428 &neigh_update->neigh_stats_work,
429 neigh_update->min_interval);
430}
431
432static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
433{
434 struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
435 neigh_update.neigh_stats_work.work);
436 struct net_device *netdev = rpriv->netdev;
437 struct mlx5e_priv *priv = netdev_priv(netdev);
438 struct mlx5e_neigh_hash_entry *nhe;
439
440 rtnl_lock();
441 if (!list_empty(&rpriv->neigh_update.neigh_list))
442 mlx5e_rep_queue_neigh_stats_work(priv);
443
444 list_for_each_entry(nhe, &rpriv->neigh_update.neigh_list, neigh_list)
445 mlx5e_tc_update_neigh_used_value(nhe);
446
447 rtnl_unlock();
448}
449
450static void mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe)
451{
452 refcount_inc(&nhe->refcnt);
453}
454
455static void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe)
456{
457 if (refcount_dec_and_test(&nhe->refcnt))
458 kfree(nhe);
459}
460
461static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
462 struct mlx5e_encap_entry *e,
463 bool neigh_connected,
464 unsigned char ha[ETH_ALEN])
465{
466 struct ethhdr *eth = (struct ethhdr *)e->encap_header;
467
468 ASSERT_RTNL();
469
470 if ((e->flags & MLX5_ENCAP_ENTRY_VALID) &&
471 (!neigh_connected || !ether_addr_equal(e->h_dest, ha)))
472 mlx5e_tc_encap_flows_del(priv, e);
473
474 if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
475 ether_addr_copy(e->h_dest, ha);
476 ether_addr_copy(eth->h_dest, ha);
477
478 mlx5e_tc_encap_flows_add(priv, e);
479 }
480}
481
482static void mlx5e_rep_neigh_update(struct work_struct *work)
483{
484 struct mlx5e_neigh_hash_entry *nhe =
485 container_of(work, struct mlx5e_neigh_hash_entry, neigh_update_work);
486 struct neighbour *n = nhe->n;
487 struct mlx5e_encap_entry *e;
488 unsigned char ha[ETH_ALEN];
489 struct mlx5e_priv *priv;
490 bool neigh_connected;
491 bool encap_connected;
492 u8 nud_state, dead;
493
494 rtnl_lock();
495
496
497
498
499
500
501 read_lock_bh(&n->lock);
502 memcpy(ha, n->ha, ETH_ALEN);
503 nud_state = n->nud_state;
504 dead = n->dead;
505 read_unlock_bh(&n->lock);
506
507 neigh_connected = (nud_state & NUD_VALID) && !dead;
508
509 list_for_each_entry(e, &nhe->encap_list, encap_list) {
510 encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
511 priv = netdev_priv(e->out_dev);
512
513 if (encap_connected != neigh_connected ||
514 !ether_addr_equal(e->h_dest, ha))
515 mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
516 }
517 mlx5e_rep_neigh_entry_release(nhe);
518 rtnl_unlock();
519 neigh_release(n);
520}
521
522static struct mlx5e_neigh_hash_entry *
523mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
524 struct mlx5e_neigh *m_neigh);
525
526static int mlx5e_rep_netevent_event(struct notifier_block *nb,
527 unsigned long event, void *ptr)
528{
529 struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
530 neigh_update.netevent_nb);
531 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
532 struct net_device *netdev = rpriv->netdev;
533 struct mlx5e_priv *priv = netdev_priv(netdev);
534 struct mlx5e_neigh_hash_entry *nhe = NULL;
535 struct mlx5e_neigh m_neigh = {};
536 struct neigh_parms *p;
537 struct neighbour *n;
538 bool found = false;
539
540 switch (event) {
541 case NETEVENT_NEIGH_UPDATE:
542 n = ptr;
543#if IS_ENABLED(CONFIG_IPV6)
544 if (n->tbl != &nd_tbl && n->tbl != &arp_tbl)
545#else
546 if (n->tbl != &arp_tbl)
547#endif
548 return NOTIFY_DONE;
549
550 m_neigh.dev = n->dev;
551 m_neigh.family = n->ops->family;
552 memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
553
554
555
556
557
558 spin_lock_bh(&neigh_update->encap_lock);
559 nhe = mlx5e_rep_neigh_entry_lookup(priv, &m_neigh);
560 if (!nhe) {
561 spin_unlock_bh(&neigh_update->encap_lock);
562 return NOTIFY_DONE;
563 }
564
565
566
567
568 nhe->n = n;
569
570
571
572
573
574 neigh_hold(n);
575 mlx5e_rep_neigh_entry_hold(nhe);
576
577 if (!queue_work(priv->wq, &nhe->neigh_update_work)) {
578 mlx5e_rep_neigh_entry_release(nhe);
579 neigh_release(n);
580 }
581 spin_unlock_bh(&neigh_update->encap_lock);
582 break;
583
584 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
585 p = ptr;
586
587
588
589
590
591#if IS_ENABLED(CONFIG_IPV6)
592 if (!p->dev || (p->tbl != &nd_tbl && p->tbl != &arp_tbl))
593#else
594 if (!p->dev || p->tbl != &arp_tbl)
595#endif
596 return NOTIFY_DONE;
597
598
599
600
601
602
603 spin_lock_bh(&neigh_update->encap_lock);
604 list_for_each_entry(nhe, &neigh_update->neigh_list, neigh_list) {
605 if (p->dev == nhe->m_neigh.dev) {
606 found = true;
607 break;
608 }
609 }
610 spin_unlock_bh(&neigh_update->encap_lock);
611 if (!found)
612 return NOTIFY_DONE;
613
614 neigh_update->min_interval = min_t(unsigned long,
615 NEIGH_VAR(p, DELAY_PROBE_TIME),
616 neigh_update->min_interval);
617 mlx5_fc_update_sampling_interval(priv->mdev,
618 neigh_update->min_interval);
619 break;
620 }
621 return NOTIFY_DONE;
622}
623
624static const struct rhashtable_params mlx5e_neigh_ht_params = {
625 .head_offset = offsetof(struct mlx5e_neigh_hash_entry, rhash_node),
626 .key_offset = offsetof(struct mlx5e_neigh_hash_entry, m_neigh),
627 .key_len = sizeof(struct mlx5e_neigh),
628 .automatic_shrinking = true,
629};
630
631static int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv)
632{
633 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
634 int err;
635
636 err = rhashtable_init(&neigh_update->neigh_ht, &mlx5e_neigh_ht_params);
637 if (err)
638 return err;
639
640 INIT_LIST_HEAD(&neigh_update->neigh_list);
641 spin_lock_init(&neigh_update->encap_lock);
642 INIT_DELAYED_WORK(&neigh_update->neigh_stats_work,
643 mlx5e_rep_neigh_stats_work);
644 mlx5e_rep_neigh_update_init_interval(rpriv);
645
646 rpriv->neigh_update.netevent_nb.notifier_call = mlx5e_rep_netevent_event;
647 err = register_netevent_notifier(&rpriv->neigh_update.netevent_nb);
648 if (err)
649 goto out_err;
650 return 0;
651
652out_err:
653 rhashtable_destroy(&neigh_update->neigh_ht);
654 return err;
655}
656
657static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
658{
659 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
660 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
661
662 unregister_netevent_notifier(&neigh_update->netevent_nb);
663
664 flush_workqueue(priv->wq);
665
666 cancel_delayed_work_sync(&rpriv->neigh_update.neigh_stats_work);
667
668 rhashtable_destroy(&neigh_update->neigh_ht);
669}
670
671static int mlx5e_rep_neigh_entry_insert(struct mlx5e_priv *priv,
672 struct mlx5e_neigh_hash_entry *nhe)
673{
674 struct mlx5e_rep_priv *rpriv = priv->ppriv;
675 int err;
676
677 err = rhashtable_insert_fast(&rpriv->neigh_update.neigh_ht,
678 &nhe->rhash_node,
679 mlx5e_neigh_ht_params);
680 if (err)
681 return err;
682
683 list_add(&nhe->neigh_list, &rpriv->neigh_update.neigh_list);
684
685 return err;
686}
687
688static void mlx5e_rep_neigh_entry_remove(struct mlx5e_priv *priv,
689 struct mlx5e_neigh_hash_entry *nhe)
690{
691 struct mlx5e_rep_priv *rpriv = priv->ppriv;
692
693 spin_lock_bh(&rpriv->neigh_update.encap_lock);
694
695 list_del(&nhe->neigh_list);
696
697 rhashtable_remove_fast(&rpriv->neigh_update.neigh_ht,
698 &nhe->rhash_node,
699 mlx5e_neigh_ht_params);
700 spin_unlock_bh(&rpriv->neigh_update.encap_lock);
701}
702
703
704
705
706static struct mlx5e_neigh_hash_entry *
707mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
708 struct mlx5e_neigh *m_neigh)
709{
710 struct mlx5e_rep_priv *rpriv = priv->ppriv;
711 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
712
713 return rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh,
714 mlx5e_neigh_ht_params);
715}
716
717static int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
718 struct mlx5e_encap_entry *e,
719 struct mlx5e_neigh_hash_entry **nhe)
720{
721 int err;
722
723 *nhe = kzalloc(sizeof(**nhe), GFP_KERNEL);
724 if (!*nhe)
725 return -ENOMEM;
726
727 memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh));
728 INIT_WORK(&(*nhe)->neigh_update_work, mlx5e_rep_neigh_update);
729 INIT_LIST_HEAD(&(*nhe)->encap_list);
730 refcount_set(&(*nhe)->refcnt, 1);
731
732 err = mlx5e_rep_neigh_entry_insert(priv, *nhe);
733 if (err)
734 goto out_free;
735 return 0;
736
737out_free:
738 kfree(*nhe);
739 return err;
740}
741
742static void mlx5e_rep_neigh_entry_destroy(struct mlx5e_priv *priv,
743 struct mlx5e_neigh_hash_entry *nhe)
744{
745
746
747
748
749
750
751 mlx5e_rep_neigh_entry_remove(priv, nhe);
752 mlx5e_rep_neigh_entry_release(nhe);
753}
754
755int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
756 struct mlx5e_encap_entry *e)
757{
758 struct mlx5e_neigh_hash_entry *nhe;
759 int err;
760
761 nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
762 if (!nhe) {
763 err = mlx5e_rep_neigh_entry_create(priv, e, &nhe);
764 if (err)
765 return err;
766 }
767 list_add(&e->encap_list, &nhe->encap_list);
768 return 0;
769}
770
771void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
772 struct mlx5e_encap_entry *e)
773{
774 struct mlx5e_neigh_hash_entry *nhe;
775
776 list_del(&e->encap_list);
777 nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
778
779 if (list_empty(&nhe->encap_list))
780 mlx5e_rep_neigh_entry_destroy(priv, nhe);
781}
782
783static int mlx5e_rep_open(struct net_device *dev)
784{
785 struct mlx5e_priv *priv = netdev_priv(dev);
786 struct mlx5e_rep_priv *rpriv = priv->ppriv;
787 struct mlx5_eswitch_rep *rep = rpriv->rep;
788 int err;
789
790 mutex_lock(&priv->state_lock);
791 err = mlx5e_open_locked(dev);
792 if (err)
793 goto unlock;
794
795 if (!mlx5_modify_vport_admin_state(priv->mdev,
796 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
797 rep->vport, MLX5_VPORT_ADMIN_STATE_UP))
798 netif_carrier_on(dev);
799
800unlock:
801 mutex_unlock(&priv->state_lock);
802 return err;
803}
804
805static int mlx5e_rep_close(struct net_device *dev)
806{
807 struct mlx5e_priv *priv = netdev_priv(dev);
808 struct mlx5e_rep_priv *rpriv = priv->ppriv;
809 struct mlx5_eswitch_rep *rep = rpriv->rep;
810 int ret;
811
812 mutex_lock(&priv->state_lock);
813 mlx5_modify_vport_admin_state(priv->mdev,
814 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
815 rep->vport, MLX5_VPORT_ADMIN_STATE_DOWN);
816 ret = mlx5e_close_locked(dev);
817 mutex_unlock(&priv->state_lock);
818 return ret;
819}
820
821static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
822 char *buf, size_t len)
823{
824 struct mlx5e_priv *priv = netdev_priv(dev);
825 struct mlx5e_rep_priv *rpriv = priv->ppriv;
826 struct mlx5_eswitch_rep *rep = rpriv->rep;
827 int ret;
828
829 ret = snprintf(buf, len, "%d", rep->vport - 1);
830 if (ret >= len)
831 return -EOPNOTSUPP;
832
833 return 0;
834}
835
836static int
837mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
838 struct tc_cls_flower_offload *cls_flower, int flags)
839{
840 switch (cls_flower->command) {
841 case TC_CLSFLOWER_REPLACE:
842 return mlx5e_configure_flower(priv, cls_flower, flags);
843 case TC_CLSFLOWER_DESTROY:
844 return mlx5e_delete_flower(priv, cls_flower, flags);
845 case TC_CLSFLOWER_STATS:
846 return mlx5e_stats_flower(priv, cls_flower, flags);
847 default:
848 return -EOPNOTSUPP;
849 }
850}
851
852static int mlx5e_rep_setup_tc_cb_egdev(enum tc_setup_type type, void *type_data,
853 void *cb_priv)
854{
855 struct mlx5e_priv *priv = cb_priv;
856
857 switch (type) {
858 case TC_SETUP_CLSFLOWER:
859 return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_EGRESS);
860 default:
861 return -EOPNOTSUPP;
862 }
863}
864
865static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
866 void *cb_priv)
867{
868 struct mlx5e_priv *priv = cb_priv;
869
870 switch (type) {
871 case TC_SETUP_CLSFLOWER:
872 return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS);
873 default:
874 return -EOPNOTSUPP;
875 }
876}
877
878static int mlx5e_rep_setup_tc_block(struct net_device *dev,
879 struct tc_block_offload *f)
880{
881 struct mlx5e_priv *priv = netdev_priv(dev);
882
883 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
884 return -EOPNOTSUPP;
885
886 switch (f->command) {
887 case TC_BLOCK_BIND:
888 return tcf_block_cb_register(f->block, mlx5e_rep_setup_tc_cb,
889 priv, priv, f->extack);
890 case TC_BLOCK_UNBIND:
891 tcf_block_cb_unregister(f->block, mlx5e_rep_setup_tc_cb, priv);
892 return 0;
893 default:
894 return -EOPNOTSUPP;
895 }
896}
897
898static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
899 void *type_data)
900{
901 switch (type) {
902 case TC_SETUP_BLOCK:
903 return mlx5e_rep_setup_tc_block(dev, type_data);
904 default:
905 return -EOPNOTSUPP;
906 }
907}
908
909bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
910{
911 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
912 struct mlx5e_rep_priv *rpriv = priv->ppriv;
913 struct mlx5_eswitch_rep *rep;
914
915 if (!MLX5_ESWITCH_MANAGER(priv->mdev))
916 return false;
917
918 rep = rpriv->rep;
919 if (esw->mode == SRIOV_OFFLOADS &&
920 rep && rep->vport == FDB_UPLINK_VPORT)
921 return true;
922
923 return false;
924}
925
926static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
927{
928 struct mlx5e_rep_priv *rpriv = priv->ppriv;
929 struct mlx5_eswitch_rep *rep;
930
931 if (!MLX5_ESWITCH_MANAGER(priv->mdev))
932 return false;
933
934 rep = rpriv->rep;
935 if (rep && rep->vport != FDB_UPLINK_VPORT)
936 return true;
937
938 return false;
939}
940
941bool mlx5e_has_offload_stats(const struct net_device *dev, int attr_id)
942{
943 struct mlx5e_priv *priv = netdev_priv(dev);
944
945 switch (attr_id) {
946 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
947 if (mlx5e_is_vf_vport_rep(priv) || mlx5e_is_uplink_rep(priv))
948 return true;
949 }
950
951 return false;
952}
953
954static int
955mlx5e_get_sw_stats64(const struct net_device *dev,
956 struct rtnl_link_stats64 *stats)
957{
958 struct mlx5e_priv *priv = netdev_priv(dev);
959 struct mlx5e_sw_stats *sstats = &priv->stats.sw;
960
961 mlx5e_rep_update_sw_counters(priv);
962
963 stats->rx_packets = sstats->rx_packets;
964 stats->rx_bytes = sstats->rx_bytes;
965 stats->tx_packets = sstats->tx_packets;
966 stats->tx_bytes = sstats->tx_bytes;
967
968 stats->tx_dropped = sstats->tx_queue_dropped;
969
970 return 0;
971}
972
973int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
974 void *sp)
975{
976 switch (attr_id) {
977 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
978 return mlx5e_get_sw_stats64(dev, sp);
979 }
980
981 return -EINVAL;
982}
983
984static void
985mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
986{
987 struct mlx5e_priv *priv = netdev_priv(dev);
988
989
990 mlx5e_queue_update_stats(priv);
991 memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
992}
993
994static const struct switchdev_ops mlx5e_rep_switchdev_ops = {
995 .switchdev_port_attr_get = mlx5e_attr_get,
996};
997
998static int mlx5e_change_rep_mtu(struct net_device *netdev, int new_mtu)
999{
1000 return mlx5e_change_mtu(netdev, new_mtu, NULL);
1001}
1002
1003static const struct net_device_ops mlx5e_netdev_ops_rep = {
1004 .ndo_open = mlx5e_rep_open,
1005 .ndo_stop = mlx5e_rep_close,
1006 .ndo_start_xmit = mlx5e_xmit,
1007 .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name,
1008 .ndo_setup_tc = mlx5e_rep_setup_tc,
1009 .ndo_get_stats64 = mlx5e_rep_get_stats,
1010 .ndo_has_offload_stats = mlx5e_has_offload_stats,
1011 .ndo_get_offload_stats = mlx5e_get_offload_stats,
1012 .ndo_change_mtu = mlx5e_change_rep_mtu,
1013};
1014
1015static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
1016 struct mlx5e_params *params, u16 mtu)
1017{
1018 u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
1019 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
1020 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
1021
1022 params->hard_mtu = MLX5E_ETH_HARD_MTU;
1023 params->sw_mtu = mtu;
1024 params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE;
1025
1026
1027 mlx5e_build_rq_params(mdev, params);
1028
1029
1030 params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
1031 mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
1032
1033 params->num_tc = 1;
1034
1035 mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode);
1036
1037
1038 mlx5e_build_rss_params(params);
1039}
1040
1041static void mlx5e_build_rep_netdev(struct net_device *netdev)
1042{
1043 struct mlx5e_priv *priv = netdev_priv(netdev);
1044 struct mlx5_core_dev *mdev = priv->mdev;
1045 u16 max_mtu;
1046
1047 netdev->netdev_ops = &mlx5e_netdev_ops_rep;
1048
1049 netdev->watchdog_timeo = 15 * HZ;
1050
1051 netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
1052
1053 netdev->switchdev_ops = &mlx5e_rep_switchdev_ops;
1054
1055 netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL;
1056 netdev->hw_features |= NETIF_F_HW_TC;
1057
1058 netdev->hw_features |= NETIF_F_SG;
1059 netdev->hw_features |= NETIF_F_IP_CSUM;
1060 netdev->hw_features |= NETIF_F_IPV6_CSUM;
1061 netdev->hw_features |= NETIF_F_GRO;
1062 netdev->hw_features |= NETIF_F_TSO;
1063 netdev->hw_features |= NETIF_F_TSO6;
1064 netdev->hw_features |= NETIF_F_RXCSUM;
1065
1066 netdev->features |= netdev->hw_features;
1067
1068 eth_hw_addr_random(netdev);
1069
1070 netdev->min_mtu = ETH_MIN_MTU;
1071 mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
1072 netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
1073}
1074
1075static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
1076 struct net_device *netdev,
1077 const struct mlx5e_profile *profile,
1078 void *ppriv)
1079{
1080 struct mlx5e_priv *priv = netdev_priv(netdev);
1081 int err;
1082
1083 err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
1084 if (err)
1085 return err;
1086
1087 priv->channels.params.num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
1088
1089 mlx5e_build_rep_params(mdev, &priv->channels.params, netdev->mtu);
1090 mlx5e_build_rep_netdev(netdev);
1091
1092 mlx5e_timestamp_init(priv);
1093
1094 return 0;
1095}
1096
1097static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
1098{
1099 mlx5e_netdev_cleanup(priv->netdev, priv);
1100}
1101
1102static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
1103{
1104 struct ttc_params ttc_params = {};
1105 int tt, err;
1106
1107 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
1108 MLX5_FLOW_NAMESPACE_KERNEL);
1109
1110
1111 ttc_params.any_tt_tirn = priv->direct_tir[0].tirn;
1112 mlx5e_set_ttc_ft_params(&ttc_params);
1113 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
1114 ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
1115
1116 err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
1117 if (err) {
1118 netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n", err);
1119 return err;
1120 }
1121 return 0;
1122}
1123
1124static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv)
1125{
1126 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1127 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1128 struct mlx5_eswitch_rep *rep = rpriv->rep;
1129 struct mlx5_flow_handle *flow_rule;
1130 struct mlx5_flow_destination dest;
1131
1132 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1133 dest.tir_num = priv->direct_tir[0].tirn;
1134 flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
1135 rep->vport,
1136 &dest);
1137 if (IS_ERR(flow_rule))
1138 return PTR_ERR(flow_rule);
1139 rpriv->vport_rx_rule = flow_rule;
1140 return 0;
1141}
1142
1143static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
1144{
1145 struct mlx5_core_dev *mdev = priv->mdev;
1146 int err;
1147
1148 mlx5e_init_l2_addr(priv);
1149
1150 err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
1151 if (err) {
1152 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
1153 return err;
1154 }
1155
1156 err = mlx5e_create_indirect_rqt(priv);
1157 if (err)
1158 goto err_close_drop_rq;
1159
1160 err = mlx5e_create_direct_rqts(priv);
1161 if (err)
1162 goto err_destroy_indirect_rqts;
1163
1164 err = mlx5e_create_indirect_tirs(priv, false);
1165 if (err)
1166 goto err_destroy_direct_rqts;
1167
1168 err = mlx5e_create_direct_tirs(priv);
1169 if (err)
1170 goto err_destroy_indirect_tirs;
1171
1172 err = mlx5e_create_rep_ttc_table(priv);
1173 if (err)
1174 goto err_destroy_direct_tirs;
1175
1176 err = mlx5e_create_rep_vport_rx_rule(priv);
1177 if (err)
1178 goto err_destroy_ttc_table;
1179
1180 return 0;
1181
1182err_destroy_ttc_table:
1183 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
1184err_destroy_direct_tirs:
1185 mlx5e_destroy_direct_tirs(priv);
1186err_destroy_indirect_tirs:
1187 mlx5e_destroy_indirect_tirs(priv, false);
1188err_destroy_direct_rqts:
1189 mlx5e_destroy_direct_rqts(priv);
1190err_destroy_indirect_rqts:
1191 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
1192err_close_drop_rq:
1193 mlx5e_close_drop_rq(&priv->drop_rq);
1194 return err;
1195}
1196
1197static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
1198{
1199 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1200
1201 mlx5_del_flow_rules(rpriv->vport_rx_rule);
1202 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
1203 mlx5e_destroy_direct_tirs(priv);
1204 mlx5e_destroy_indirect_tirs(priv, false);
1205 mlx5e_destroy_direct_rqts(priv);
1206 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
1207 mlx5e_close_drop_rq(&priv->drop_rq);
1208}
1209
1210static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
1211{
1212 int err;
1213
1214 err = mlx5e_create_tises(priv);
1215 if (err) {
1216 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
1217 return err;
1218 }
1219 return 0;
1220}
1221
1222static const struct mlx5e_profile mlx5e_rep_profile = {
1223 .init = mlx5e_init_rep,
1224 .cleanup = mlx5e_cleanup_rep,
1225 .init_rx = mlx5e_init_rep_rx,
1226 .cleanup_rx = mlx5e_cleanup_rep_rx,
1227 .init_tx = mlx5e_init_rep_tx,
1228 .cleanup_tx = mlx5e_cleanup_nic_tx,
1229 .update_stats = mlx5e_rep_update_hw_counters,
1230 .update_carrier = NULL,
1231 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
1232 .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
1233 .max_tc = 1,
1234};
1235
1236
1237
1238static int
1239mlx5e_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1240{
1241 struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1242 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
1243
1244 int err;
1245
1246 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
1247 err = mlx5e_add_sqs_fwd_rules(priv);
1248 if (err)
1249 return err;
1250 }
1251
1252 err = mlx5e_rep_neigh_init(rpriv);
1253 if (err)
1254 goto err_remove_sqs;
1255
1256
1257 err = mlx5e_tc_esw_init(&rpriv->tc_ht);
1258 if (err)
1259 goto err_neigh_cleanup;
1260
1261 return 0;
1262
1263err_neigh_cleanup:
1264 mlx5e_rep_neigh_cleanup(rpriv);
1265err_remove_sqs:
1266 mlx5e_remove_sqs_fwd_rules(priv);
1267 return err;
1268}
1269
1270static void
1271mlx5e_nic_rep_unload(struct mlx5_eswitch_rep *rep)
1272{
1273 struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1274 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
1275
1276 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
1277 mlx5e_remove_sqs_fwd_rules(priv);
1278
1279
1280 mlx5e_tc_esw_cleanup(&rpriv->tc_ht);
1281
1282 mlx5e_rep_neigh_cleanup(rpriv);
1283}
1284
1285static int
1286mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1287{
1288 struct mlx5e_rep_priv *uplink_rpriv;
1289 struct mlx5e_rep_priv *rpriv;
1290 struct net_device *netdev;
1291 struct mlx5e_priv *upriv;
1292 int nch, err;
1293
1294 rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
1295 if (!rpriv)
1296 return -ENOMEM;
1297
1298 nch = mlx5e_get_max_num_channels(dev);
1299 netdev = mlx5e_create_netdev(dev, &mlx5e_rep_profile, nch, rpriv);
1300 if (!netdev) {
1301 pr_warn("Failed to create representor netdev for vport %d\n",
1302 rep->vport);
1303 kfree(rpriv);
1304 return -EINVAL;
1305 }
1306
1307 rpriv->netdev = netdev;
1308 rpriv->rep = rep;
1309 rep->rep_if[REP_ETH].priv = rpriv;
1310 INIT_LIST_HEAD(&rpriv->vport_sqs_list);
1311
1312 err = mlx5e_attach_netdev(netdev_priv(netdev));
1313 if (err) {
1314 pr_warn("Failed to attach representor netdev for vport %d\n",
1315 rep->vport);
1316 goto err_destroy_netdev;
1317 }
1318
1319 err = mlx5e_rep_neigh_init(rpriv);
1320 if (err) {
1321 pr_warn("Failed to initialized neighbours handling for vport %d\n",
1322 rep->vport);
1323 goto err_detach_netdev;
1324 }
1325
1326 uplink_rpriv = mlx5_eswitch_get_uplink_priv(dev->priv.eswitch, REP_ETH);
1327 upriv = netdev_priv(uplink_rpriv->netdev);
1328 err = tc_setup_cb_egdev_register(netdev, mlx5e_rep_setup_tc_cb_egdev,
1329 upriv);
1330 if (err)
1331 goto err_neigh_cleanup;
1332
1333 err = register_netdev(netdev);
1334 if (err) {
1335 pr_warn("Failed to register representor netdev for vport %d\n",
1336 rep->vport);
1337 goto err_egdev_cleanup;
1338 }
1339
1340 return 0;
1341
1342err_egdev_cleanup:
1343 tc_setup_cb_egdev_unregister(netdev, mlx5e_rep_setup_tc_cb_egdev,
1344 upriv);
1345
1346err_neigh_cleanup:
1347 mlx5e_rep_neigh_cleanup(rpriv);
1348
1349err_detach_netdev:
1350 mlx5e_detach_netdev(netdev_priv(netdev));
1351
1352err_destroy_netdev:
1353 mlx5e_destroy_netdev(netdev_priv(netdev));
1354 kfree(rpriv);
1355 return err;
1356}
1357
1358static void
1359mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
1360{
1361 struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1362 struct net_device *netdev = rpriv->netdev;
1363 struct mlx5e_priv *priv = netdev_priv(netdev);
1364 struct mlx5e_rep_priv *uplink_rpriv;
1365 void *ppriv = priv->ppriv;
1366 struct mlx5e_priv *upriv;
1367
1368 unregister_netdev(netdev);
1369 uplink_rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch,
1370 REP_ETH);
1371 upriv = netdev_priv(uplink_rpriv->netdev);
1372 tc_setup_cb_egdev_unregister(netdev, mlx5e_rep_setup_tc_cb_egdev,
1373 upriv);
1374 mlx5e_rep_neigh_cleanup(rpriv);
1375 mlx5e_detach_netdev(priv);
1376 mlx5e_destroy_netdev(priv);
1377 kfree(ppriv);
1378}
1379
1380static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
1381{
1382 struct mlx5e_rep_priv *rpriv;
1383
1384 rpriv = mlx5e_rep_to_rep_priv(rep);
1385
1386 return rpriv->netdev;
1387}
1388
1389static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv)
1390{
1391 struct mlx5_core_dev *mdev = priv->mdev;
1392 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1393 int total_vfs = MLX5_TOTAL_VPORTS(mdev);
1394 int vport;
1395
1396 for (vport = 1; vport < total_vfs; vport++) {
1397 struct mlx5_eswitch_rep_if rep_if = {};
1398
1399 rep_if.load = mlx5e_vport_rep_load;
1400 rep_if.unload = mlx5e_vport_rep_unload;
1401 rep_if.get_proto_dev = mlx5e_vport_rep_get_proto_dev;
1402 mlx5_eswitch_register_vport_rep(esw, vport, &rep_if, REP_ETH);
1403 }
1404}
1405
1406static void mlx5e_rep_unregister_vf_vports(struct mlx5e_priv *priv)
1407{
1408 struct mlx5_core_dev *mdev = priv->mdev;
1409 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1410 int total_vfs = MLX5_TOTAL_VPORTS(mdev);
1411 int vport;
1412
1413 for (vport = 1; vport < total_vfs; vport++)
1414 mlx5_eswitch_unregister_vport_rep(esw, vport, REP_ETH);
1415}
1416
1417void mlx5e_register_vport_reps(struct mlx5e_priv *priv)
1418{
1419 struct mlx5_core_dev *mdev = priv->mdev;
1420 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1421 struct mlx5_eswitch_rep_if rep_if;
1422 struct mlx5e_rep_priv *rpriv;
1423
1424 rpriv = priv->ppriv;
1425 rpriv->netdev = priv->netdev;
1426
1427 rep_if.load = mlx5e_nic_rep_load;
1428 rep_if.unload = mlx5e_nic_rep_unload;
1429 rep_if.get_proto_dev = mlx5e_vport_rep_get_proto_dev;
1430 rep_if.priv = rpriv;
1431 INIT_LIST_HEAD(&rpriv->vport_sqs_list);
1432 mlx5_eswitch_register_vport_rep(esw, 0, &rep_if, REP_ETH);
1433
1434 mlx5e_rep_register_vf_vports(priv);
1435}
1436
1437void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv)
1438{
1439 struct mlx5_core_dev *mdev = priv->mdev;
1440 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1441
1442 mlx5e_rep_unregister_vf_vports(priv);
1443 mlx5_eswitch_unregister_vport_rep(esw, 0, REP_ETH);
1444}
1445
1446void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev)
1447{
1448 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1449 struct mlx5e_rep_priv *rpriv;
1450
1451 rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
1452 if (!rpriv)
1453 return NULL;
1454
1455 rpriv->rep = &esw->offloads.vport_reps[0];
1456 return rpriv;
1457}
1458