1
2
3
4#include "ice.h"
5#include "ice_lib.h"
6#include "ice_eswitch.h"
7#include "ice_fltr.h"
8#include "ice_repr.h"
9#include "ice_devlink.h"
10#include "ice_tc_lib.h"
11
12
13
14
15
16
17
18
19static int ice_eswitch_setup_env(struct ice_pf *pf)
20{
21 struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
22 struct net_device *uplink_netdev = uplink_vsi->netdev;
23 struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
24 struct ice_port_info *pi = pf->hw.port_info;
25 bool rule_added = false;
26
27 ice_vsi_manage_vlan_stripping(ctrl_vsi, false);
28
29 ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
30
31 netif_addr_lock_bh(uplink_netdev);
32 __dev_uc_unsync(uplink_netdev, NULL);
33 __dev_mc_unsync(uplink_netdev, NULL);
34 netif_addr_unlock_bh(uplink_netdev);
35
36 if (ice_vsi_add_vlan(uplink_vsi, 0, ICE_FWD_TO_VSI))
37 goto err_def_rx;
38
39 if (!ice_is_dflt_vsi_in_use(uplink_vsi->vsw)) {
40 if (ice_set_dflt_vsi(uplink_vsi->vsw, uplink_vsi))
41 goto err_def_rx;
42 rule_added = true;
43 }
44
45 if (ice_cfg_dflt_vsi(pi->hw, ctrl_vsi->idx, true, ICE_FLTR_TX))
46 goto err_def_tx;
47
48 if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
49 goto err_override_uplink;
50
51 if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override))
52 goto err_override_control;
53
54 if (ice_fltr_update_flags_dflt_rule(ctrl_vsi, pi->dflt_tx_vsi_rule_id,
55 ICE_FLTR_TX,
56 ICE_SINGLE_ACT_LB_ENABLE))
57 goto err_update_action;
58
59 return 0;
60
61err_update_action:
62 ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
63err_override_control:
64 ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
65err_override_uplink:
66 ice_cfg_dflt_vsi(pi->hw, ctrl_vsi->idx, false, ICE_FLTR_TX);
67err_def_tx:
68 if (rule_added)
69 ice_clear_dflt_vsi(uplink_vsi->vsw);
70err_def_rx:
71 ice_fltr_add_mac_and_broadcast(uplink_vsi,
72 uplink_vsi->port_info->mac.perm_addr,
73 ICE_FWD_TO_VSI);
74 return -ENODEV;
75}
76
77
78
79
80
81
82
83
84
85
86
87
88static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
89{
90 struct ice_vsi *vsi = pf->switchdev.control_vsi;
91 int q_id;
92
93 ice_for_each_txq(vsi, q_id) {
94 struct ice_repr *repr = pf->vf[q_id].repr;
95 struct ice_q_vector *q_vector = repr->q_vector;
96 struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id];
97 struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id];
98
99 q_vector->vsi = vsi;
100 q_vector->reg_idx = vsi->q_vectors[0]->reg_idx;
101
102 q_vector->num_ring_tx = 1;
103 q_vector->tx.tx_ring = tx_ring;
104 tx_ring->q_vector = q_vector;
105 tx_ring->next = NULL;
106 tx_ring->netdev = repr->netdev;
107
108
109
110 tx_ring->q_index = 0;
111
112 q_vector->num_ring_rx = 1;
113 q_vector->rx.rx_ring = rx_ring;
114 rx_ring->q_vector = q_vector;
115 rx_ring->next = NULL;
116 rx_ring->netdev = repr->netdev;
117 }
118}
119
120
121
122
123
124static int ice_eswitch_setup_reprs(struct ice_pf *pf)
125{
126 struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
127 int max_vsi_num = 0;
128 int i;
129
130 ice_for_each_vf(pf, i) {
131 struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
132 struct ice_vf *vf = &pf->vf[i];
133
134 ice_remove_vsi_fltr(&pf->hw, vsi->idx);
135 vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
136 GFP_KERNEL);
137 if (!vf->repr->dst) {
138 ice_fltr_add_mac_and_broadcast(vsi,
139 vf->hw_lan_addr.addr,
140 ICE_FWD_TO_VSI);
141 goto err;
142 }
143
144 if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) {
145 ice_fltr_add_mac_and_broadcast(vsi,
146 vf->hw_lan_addr.addr,
147 ICE_FWD_TO_VSI);
148 metadata_dst_free(vf->repr->dst);
149 goto err;
150 }
151
152 if (ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI)) {
153 ice_fltr_add_mac_and_broadcast(vsi,
154 vf->hw_lan_addr.addr,
155 ICE_FWD_TO_VSI);
156 metadata_dst_free(vf->repr->dst);
157 ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
158 goto err;
159 }
160
161 if (max_vsi_num < vsi->vsi_num)
162 max_vsi_num = vsi->vsi_num;
163
164 netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi, ice_napi_poll,
165 NAPI_POLL_WEIGHT);
166
167 netif_keep_dst(vf->repr->netdev);
168 }
169
170 kfree(ctrl_vsi->target_netdevs);
171
172 ctrl_vsi->target_netdevs = kcalloc(max_vsi_num + 1,
173 sizeof(*ctrl_vsi->target_netdevs),
174 GFP_KERNEL);
175 if (!ctrl_vsi->target_netdevs)
176 goto err;
177
178 ice_for_each_vf(pf, i) {
179 struct ice_repr *repr = pf->vf[i].repr;
180 struct ice_vsi *vsi = repr->src_vsi;
181 struct metadata_dst *dst;
182
183 ctrl_vsi->target_netdevs[vsi->vsi_num] = repr->netdev;
184
185 dst = repr->dst;
186 dst->u.port_info.port_id = vsi->vsi_num;
187 dst->u.port_info.lower_dev = repr->netdev;
188 ice_repr_set_traffic_vsi(repr, ctrl_vsi);
189 }
190
191 return 0;
192
193err:
194 for (i = i - 1; i >= 0; i--) {
195 struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
196 struct ice_vf *vf = &pf->vf[i];
197
198 ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
199 metadata_dst_free(vf->repr->dst);
200 ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
201 ICE_FWD_TO_VSI);
202 }
203
204 return -ENODEV;
205}
206
207
208
209
210
211
212static void
213ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
214{
215 int i;
216
217 kfree(ctrl_vsi->target_netdevs);
218 ice_for_each_vf(pf, i) {
219 struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
220 struct ice_vf *vf = &pf->vf[i];
221
222 ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
223 metadata_dst_free(vf->repr->dst);
224 ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
225 ICE_FWD_TO_VSI);
226
227 netif_napi_del(&vf->repr->q_vector->napi);
228 }
229}
230
231
232
233
234
235void ice_eswitch_update_repr(struct ice_vsi *vsi)
236{
237 struct ice_pf *pf = vsi->back;
238 struct ice_repr *repr;
239 struct ice_vf *vf;
240 int ret;
241
242 if (!ice_is_switchdev_running(pf))
243 return;
244
245 vf = &pf->vf[vsi->vf_id];
246 repr = vf->repr;
247 repr->src_vsi = vsi;
248 repr->dst->u.port_info.port_id = vsi->vsi_num;
249
250 ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
251 if (ret) {
252 ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, ICE_FWD_TO_VSI);
253 dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor", vsi->vf_id);
254 }
255}
256
257
258
259
260
261
262
263
264netdev_tx_t
265ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
266{
267 struct ice_netdev_priv *np;
268 struct ice_repr *repr;
269 struct ice_vsi *vsi;
270
271 np = netdev_priv(netdev);
272 vsi = np->vsi;
273
274 if (ice_is_reset_in_progress(vsi->back->state))
275 return NETDEV_TX_BUSY;
276
277 repr = ice_netdev_to_repr(netdev);
278 skb_dst_drop(skb);
279 dst_hold((struct dst_entry *)repr->dst);
280 skb_dst_set(skb, (struct dst_entry *)repr->dst);
281 skb->queue_mapping = repr->vf->vf_id;
282
283 return ice_start_xmit(skb, netdev);
284}
285
286
287
288
289
290
291void
292ice_eswitch_set_target_vsi(struct sk_buff *skb,
293 struct ice_tx_offload_params *off)
294{
295 struct metadata_dst *dst = skb_metadata_dst(skb);
296 u64 cd_cmd, dst_vsi;
297
298 if (!dst) {
299 cd_cmd = ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S;
300 off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX);
301 } else {
302 cd_cmd = ICE_TX_CTX_DESC_SWTCH_VSI << ICE_TXD_CTX_QW1_CMD_S;
303 dst_vsi = ((u64)dst->u.port_info.port_id <<
304 ICE_TXD_CTX_QW1_VSI_S) & ICE_TXD_CTX_QW1_VSI_M;
305 off->cd_qw1 = cd_cmd | dst_vsi | ICE_TX_DESC_DTYPE_CTX;
306 }
307}
308
309
310
311
312
313
314
315
316static void ice_eswitch_release_env(struct ice_pf *pf)
317{
318 struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
319 struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
320
321 ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
322 ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
323 ice_cfg_dflt_vsi(&pf->hw, ctrl_vsi->idx, false, ICE_FLTR_TX);
324 ice_clear_dflt_vsi(uplink_vsi->vsw);
325 ice_fltr_add_mac_and_broadcast(uplink_vsi,
326 uplink_vsi->port_info->mac.perm_addr,
327 ICE_FWD_TO_VSI);
328}
329
330
331
332
333
334
335static struct ice_vsi *
336ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
337{
338 return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, ICE_INVAL_VFID, NULL);
339}
340
341
342
343
344
345static void ice_eswitch_napi_del(struct ice_pf *pf)
346{
347 int i;
348
349 ice_for_each_vf(pf, i)
350 netif_napi_del(&pf->vf[i].repr->q_vector->napi);
351}
352
353
354
355
356
357static void ice_eswitch_napi_enable(struct ice_pf *pf)
358{
359 int i;
360
361 ice_for_each_vf(pf, i)
362 napi_enable(&pf->vf[i].repr->q_vector->napi);
363}
364
365
366
367
368
369static void ice_eswitch_napi_disable(struct ice_pf *pf)
370{
371 int i;
372
373 ice_for_each_vf(pf, i)
374 napi_disable(&pf->vf[i].repr->q_vector->napi);
375}
376
377
378
379
380
381
382static void ice_eswitch_set_rxdid(struct ice_vsi *vsi, u32 rxdid)
383{
384 struct ice_hw *hw = &vsi->back->hw;
385 int i;
386
387 ice_for_each_rxq(vsi, i) {
388 struct ice_rx_ring *ring = vsi->rx_rings[i];
389 u16 pf_q = vsi->rxq_map[ring->q_index];
390
391 ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true);
392 }
393}
394
395
396
397
398
399static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
400{
401 struct ice_vsi *ctrl_vsi;
402
403 pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info);
404 if (!pf->switchdev.control_vsi)
405 return -ENODEV;
406
407 ctrl_vsi = pf->switchdev.control_vsi;
408 pf->switchdev.uplink_vsi = ice_get_main_vsi(pf);
409 if (!pf->switchdev.uplink_vsi)
410 goto err_vsi;
411
412 if (ice_eswitch_setup_env(pf))
413 goto err_vsi;
414
415 if (ice_repr_add_for_all_vfs(pf))
416 goto err_repr_add;
417
418 if (ice_eswitch_setup_reprs(pf))
419 goto err_setup_reprs;
420
421 ice_eswitch_remap_rings_to_vectors(pf);
422
423 if (ice_vsi_open(ctrl_vsi))
424 goto err_setup_reprs;
425
426 ice_eswitch_napi_enable(pf);
427
428 ice_eswitch_set_rxdid(ctrl_vsi, ICE_RXDID_FLEX_NIC_2);
429
430 return 0;
431
432err_setup_reprs:
433 ice_repr_rem_from_all_vfs(pf);
434err_repr_add:
435 ice_eswitch_release_env(pf);
436err_vsi:
437 ice_vsi_release(ctrl_vsi);
438 return -ENODEV;
439}
440
441
442
443
444
445static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
446{
447 struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
448
449 ice_eswitch_napi_disable(pf);
450 ice_eswitch_release_env(pf);
451 ice_eswitch_release_reprs(pf, ctrl_vsi);
452 ice_vsi_release(ctrl_vsi);
453 ice_repr_rem_from_all_vfs(pf);
454}
455
456
457
458
459
460
461
462int
463ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
464 struct netlink_ext_ack *extack)
465{
466 struct ice_pf *pf = devlink_priv(devlink);
467
468 if (pf->eswitch_mode == mode)
469 return 0;
470
471 if (pf->num_alloc_vfs) {
472 dev_info(ice_pf_to_dev(pf), "Changing eswitch mode is allowed only if there is no VFs created");
473 NL_SET_ERR_MSG_MOD(extack, "Changing eswitch mode is allowed only if there is no VFs created");
474 return -EOPNOTSUPP;
475 }
476
477 switch (mode) {
478 case DEVLINK_ESWITCH_MODE_LEGACY:
479 dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to legacy",
480 pf->hw.pf_id);
481 NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to legacy");
482 break;
483 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
484 {
485 dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev",
486 pf->hw.pf_id);
487 NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev");
488 break;
489 }
490 default:
491 NL_SET_ERR_MSG_MOD(extack, "Unknown eswitch mode");
492 return -EINVAL;
493 }
494
495 pf->eswitch_mode = mode;
496 return 0;
497}
498
499
500
501
502
503
504
505
506
507
508struct net_device *
509ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring,
510 union ice_32b_rx_flex_desc *rx_desc)
511{
512 struct ice_32b_rx_flex_desc_nic_2 *desc;
513 struct ice_vsi *vsi = rx_ring->vsi;
514 struct ice_vsi *control_vsi;
515 u16 target_vsi_id;
516
517 control_vsi = vsi->back->switchdev.control_vsi;
518 if (vsi != control_vsi)
519 return rx_ring->netdev;
520
521 desc = (struct ice_32b_rx_flex_desc_nic_2 *)rx_desc;
522 target_vsi_id = le16_to_cpu(desc->src_vsi);
523
524 return vsi->target_netdevs[target_vsi_id];
525}
526
527
528
529
530
531
532int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode)
533{
534 struct ice_pf *pf = devlink_priv(devlink);
535
536 *mode = pf->eswitch_mode;
537 return 0;
538}
539
540
541
542
543
544
545
546
547bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf)
548{
549 return pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV;
550}
551
552
553
554
555
556void ice_eswitch_release(struct ice_pf *pf)
557{
558 if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY)
559 return;
560
561 ice_eswitch_disable_switchdev(pf);
562 pf->switchdev.is_running = false;
563}
564
565
566
567
568
569int ice_eswitch_configure(struct ice_pf *pf)
570{
571 int status;
572
573 if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY || pf->switchdev.is_running)
574 return 0;
575
576 status = ice_eswitch_enable_switchdev(pf);
577 if (status)
578 return status;
579
580 pf->switchdev.is_running = true;
581 return 0;
582}
583
584
585
586
587
588static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
589{
590 struct ice_repr *repr;
591 int i;
592
593 if (test_bit(ICE_DOWN, pf->state))
594 return;
595
596 ice_for_each_vf(pf, i) {
597 repr = pf->vf[i].repr;
598 if (repr)
599 ice_repr_start_tx_queues(repr);
600 }
601}
602
603
604
605
606
607void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
608{
609 struct ice_repr *repr;
610 int i;
611
612 if (test_bit(ICE_DOWN, pf->state))
613 return;
614
615 ice_for_each_vf(pf, i) {
616 repr = pf->vf[i].repr;
617 if (repr)
618 ice_repr_stop_tx_queues(repr);
619 }
620}
621
622
623
624
625
626int ice_eswitch_rebuild(struct ice_pf *pf)
627{
628 struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
629 int status;
630
631 ice_eswitch_napi_disable(pf);
632 ice_eswitch_napi_del(pf);
633
634 status = ice_eswitch_setup_env(pf);
635 if (status)
636 return status;
637
638 status = ice_eswitch_setup_reprs(pf);
639 if (status)
640 return status;
641
642 ice_eswitch_remap_rings_to_vectors(pf);
643
644 ice_replay_tc_fltrs(pf);
645
646 status = ice_vsi_open(ctrl_vsi);
647 if (status)
648 return status;
649
650 ice_eswitch_napi_enable(pf);
651 ice_eswitch_set_rxdid(ctrl_vsi, ICE_RXDID_FLEX_NIC_2);
652 ice_eswitch_start_all_tx_queues(pf);
653
654 return 0;
655}
656