1
2
3
4#include "ice.h"
5#include "ice_base.h"
6#include "ice_lib.h"
7#include "ice_fltr.h"
8
9
10
11
12
13
14static int ice_validate_vf_id(struct ice_pf *pf, u16 vf_id)
15{
16
17 if (vf_id >= pf->num_alloc_vfs) {
18 dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %u\n", vf_id);
19 return -EINVAL;
20 }
21 return 0;
22}
23
24
25
26
27
28
29static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
30{
31 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
32 dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
33 vf->vf_id);
34 return -EBUSY;
35 }
36 return 0;
37}
38
39
40
41
42
43static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
44{
45 switch (ice_err) {
46 case ICE_SUCCESS:
47 return VIRTCHNL_STATUS_SUCCESS;
48 case ICE_ERR_BAD_PTR:
49 case ICE_ERR_INVAL_SIZE:
50 case ICE_ERR_DEVICE_NOT_SUPPORTED:
51 case ICE_ERR_PARAM:
52 case ICE_ERR_CFG:
53 return VIRTCHNL_STATUS_ERR_PARAM;
54 case ICE_ERR_NO_MEMORY:
55 return VIRTCHNL_STATUS_ERR_NO_MEMORY;
56 case ICE_ERR_NOT_READY:
57 case ICE_ERR_RESET_FAILED:
58 case ICE_ERR_FW_API_VER:
59 case ICE_ERR_AQ_ERROR:
60 case ICE_ERR_AQ_TIMEOUT:
61 case ICE_ERR_AQ_FULL:
62 case ICE_ERR_AQ_NO_WORK:
63 case ICE_ERR_AQ_EMPTY:
64 return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
65 default:
66 return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
67 }
68}
69
70
71
72
73
74
75
76
77
78static void
79ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
80 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
81{
82 struct ice_hw *hw = &pf->hw;
83 unsigned int i;
84
85 ice_for_each_vf(pf, i) {
86 struct ice_vf *vf = &pf->vf[i];
87
88
89 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
90 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
91 continue;
92
93
94
95
96 ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
97 msglen, NULL);
98 }
99}
100
101
102
103
104
105
106
107
108static void
109ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
110 int ice_link_speed, bool link_up)
111{
112 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
113 pfe->event_data.link_event_adv.link_status = link_up;
114
115 pfe->event_data.link_event_adv.link_speed =
116 ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
117 } else {
118 pfe->event_data.link_event.link_status = link_up;
119
120 pfe->event_data.link_event.link_speed =
121 (enum virtchnl_link_speed)
122 ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
123 }
124}
125
126
127
128
129
130
131
132
133static bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
134{
135 return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
136 !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
137}
138
139
140
141
142
143static bool ice_is_vf_link_up(struct ice_vf *vf)
144{
145 struct ice_pf *pf = vf->pf;
146
147 if (ice_check_vf_init(pf, vf))
148 return false;
149
150 if (ice_vf_has_no_qs_ena(vf))
151 return false;
152 else if (vf->link_forced)
153 return vf->link_up;
154 else
155 return pf->hw.port_info->phy.link_info.link_info &
156 ICE_AQ_LINK_UP;
157}
158
159
160
161
162
163
164
165static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
166{
167 struct virtchnl_pf_event pfe = { 0 };
168 struct ice_hw *hw = &vf->pf->hw;
169
170 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
171 pfe.severity = PF_EVENT_SEVERITY_INFO;
172
173 if (ice_is_vf_link_up(vf))
174 ice_set_pfe_link(vf, &pfe,
175 hw->port_info->phy.link_info.link_speed, true);
176 else
177 ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
178
179 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
180 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
181 sizeof(pfe), NULL);
182}
183
184
185
186
187
188static void ice_vf_invalidate_vsi(struct ice_vf *vf)
189{
190 vf->lan_vsi_idx = ICE_NO_VSI;
191 vf->lan_vsi_num = ICE_NO_VSI;
192}
193
194
195
196
197
198static void ice_vf_vsi_release(struct ice_vf *vf)
199{
200 ice_vsi_release(vf->pf->vsi[vf->lan_vsi_idx]);
201 ice_vf_invalidate_vsi(vf);
202}
203
204
205
206
207
208static void ice_free_vf_res(struct ice_vf *vf)
209{
210 struct ice_pf *pf = vf->pf;
211 int i, last_vector_idx;
212
213
214
215
216 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
217
218
219 if (vf->lan_vsi_idx != ICE_NO_VSI) {
220 ice_vf_vsi_release(vf);
221 vf->num_mac = 0;
222 }
223
224 last_vector_idx = vf->first_vector_idx + pf->num_msix_per_vf - 1;
225
226
227 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
228 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
229
230
231 for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
232 wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
233 ice_flush(&pf->hw);
234 }
235
236 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
237 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
238}
239
240
241
242
243
244static void ice_dis_vf_mappings(struct ice_vf *vf)
245{
246 struct ice_pf *pf = vf->pf;
247 struct ice_vsi *vsi;
248 struct device *dev;
249 int first, last, v;
250 struct ice_hw *hw;
251
252 hw = &pf->hw;
253 vsi = pf->vsi[vf->lan_vsi_idx];
254
255 dev = ice_pf_to_dev(pf);
256 wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
257 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
258
259 first = vf->first_vector_idx;
260 last = first + pf->num_msix_per_vf - 1;
261 for (v = first; v <= last; v++) {
262 u32 reg;
263
264 reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
265 GLINT_VECT2FUNC_IS_PF_M) |
266 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
267 GLINT_VECT2FUNC_PF_NUM_M));
268 wr32(hw, GLINT_VECT2FUNC(v), reg);
269 }
270
271 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
272 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
273 else
274 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
275
276 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
277 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
278 else
279 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
280}
281
282
283
284
285
286
287
288
289
290
291static int ice_sriov_free_msix_res(struct ice_pf *pf)
292{
293 struct ice_res_tracker *res;
294
295 if (!pf)
296 return -EINVAL;
297
298 res = pf->irq_tracker;
299 if (!res)
300 return -EINVAL;
301
302
303 WARN_ON(pf->sriov_base_vector < res->num_entries);
304
305 pf->sriov_base_vector = 0;
306
307 return 0;
308}
309
310
311
312
313
314void ice_set_vf_state_qs_dis(struct ice_vf *vf)
315{
316
317 bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
318 bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
319 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
320}
321
322
323
324
325
326static void ice_dis_vf_qs(struct ice_vf *vf)
327{
328 struct ice_pf *pf = vf->pf;
329 struct ice_vsi *vsi;
330
331 vsi = pf->vsi[vf->lan_vsi_idx];
332
333 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
334 ice_vsi_stop_all_rx_rings(vsi);
335 ice_set_vf_state_qs_dis(vf);
336}
337
338
339
340
341
342void ice_free_vfs(struct ice_pf *pf)
343{
344 struct device *dev = ice_pf_to_dev(pf);
345 struct ice_hw *hw = &pf->hw;
346 unsigned int tmp, i;
347
348 if (!pf->vf)
349 return;
350
351 while (test_and_set_bit(__ICE_VF_DIS, pf->state))
352 usleep_range(1000, 2000);
353
354
355
356
357
358 if (!pci_vfs_assigned(pf->pdev))
359 pci_disable_sriov(pf->pdev);
360 else
361 dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
362
363
364 ice_for_each_vf(pf, i)
365 if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states))
366 ice_dis_vf_qs(&pf->vf[i]);
367
368 tmp = pf->num_alloc_vfs;
369 pf->num_qps_per_vf = 0;
370 pf->num_alloc_vfs = 0;
371 for (i = 0; i < tmp; i++) {
372 if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
373
374 ice_dis_vf_mappings(&pf->vf[i]);
375 set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states);
376 ice_free_vf_res(&pf->vf[i]);
377 }
378 }
379
380 if (ice_sriov_free_msix_res(pf))
381 dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
382
383 devm_kfree(dev, pf->vf);
384 pf->vf = NULL;
385
386
387
388
389
390 if (!pci_vfs_assigned(pf->pdev)) {
391 unsigned int vf_id;
392
393
394
395
396 for (vf_id = 0; vf_id < tmp; vf_id++) {
397 u32 reg_idx, bit_idx;
398
399 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
400 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
401 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
402 }
403 }
404 clear_bit(__ICE_VF_DIS, pf->state);
405 clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
406}
407
408
409
410
411
412
413
414
415
416
417
418static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
419{
420 struct ice_pf *pf = vf->pf;
421 u32 reg, reg_idx, bit_idx;
422 unsigned int vf_abs_id, i;
423 struct device *dev;
424 struct ice_hw *hw;
425
426 dev = ice_pf_to_dev(pf);
427 hw = &pf->hw;
428 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
429
430
431 clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
432
433
434
435
436 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
437
438
439
440
441
442
443 if (!is_pfr)
444 wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
445
446
447
448
449 if (!is_vflr) {
450
451 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
452 reg |= VPGEN_VFRTRIG_VFSWR_M;
453 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
454 }
455
456 reg_idx = (vf_abs_id) / 32;
457 bit_idx = (vf_abs_id) % 32;
458 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
459 ice_flush(hw);
460
461 wr32(hw, PF_PCI_CIAA,
462 VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
463 for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
464 reg = rd32(hw, PF_PCI_CIAD);
465
466 if ((reg & VF_TRANS_PENDING_M) == 0)
467 break;
468
469 dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
470 udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
471 }
472}
473
474
475
476
477
478
479
480static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable)
481{
482 struct ice_hw *hw = &vsi->back->hw;
483 struct ice_aqc_vsi_props *info;
484 struct ice_vsi_ctx *ctxt;
485 enum ice_status status;
486 int ret = 0;
487
488 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
489 if (!ctxt)
490 return -ENOMEM;
491
492 ctxt->info = vsi->info;
493 info = &ctxt->info;
494 if (enable) {
495 info->vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
496 ICE_AQ_VSI_PVLAN_INSERT_PVID |
497 ICE_AQ_VSI_VLAN_EMOD_STR;
498 info->sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
499 } else {
500 info->vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING |
501 ICE_AQ_VSI_VLAN_MODE_ALL;
502 info->sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
503 }
504
505 info->pvid = cpu_to_le16(pvid_info);
506 info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
507 ICE_AQ_VSI_PROP_SW_VALID);
508
509 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
510 if (status) {
511 dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %s aq_err %s\n",
512 ice_stat_str(status),
513 ice_aq_str(hw->adminq.sq_last_status));
514 ret = -EIO;
515 goto out;
516 }
517
518 vsi->info.vlan_flags = info->vlan_flags;
519 vsi->info.sw_flags2 = info->sw_flags2;
520 vsi->info.pvid = info->pvid;
521out:
522 kfree(ctxt);
523 return ret;
524}
525
526
527
528
529
530static struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
531{
532 return vf->pf->hw.port_info;
533}
534
535
536
537
538
539
540
541
542static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
543{
544 struct ice_port_info *pi = ice_vf_get_port_info(vf);
545 struct ice_pf *pf = vf->pf;
546 struct ice_vsi *vsi;
547
548 vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id);
549
550 if (!vsi) {
551 dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
552 ice_vf_invalidate_vsi(vf);
553 return NULL;
554 }
555
556 vf->lan_vsi_idx = vsi->idx;
557 vf->lan_vsi_num = vsi->vsi_num;
558
559 return vsi;
560}
561
562
563
564
565
566
567
568
569
570
571
572
573
574static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
575{
576 return pf->sriov_base_vector + vf->vf_id * pf->num_msix_per_vf;
577}
578
579
580
581
582
583
584
585
586static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf)
587{
588 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
589 struct device *dev = ice_pf_to_dev(vf->pf);
590 u16 vlan_id = 0;
591 int err;
592
593 if (vf->port_vlan_info) {
594 err = ice_vsi_manage_pvid(vsi, vf->port_vlan_info, true);
595 if (err) {
596 dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
597 vf->vf_id, err);
598 return err;
599 }
600
601 vlan_id = vf->port_vlan_info & VLAN_VID_MASK;
602 }
603
604
605 err = ice_vsi_add_vlan(vsi, vlan_id, ICE_FWD_TO_VSI);
606 if (err) {
607 dev_err(dev, "failed to add %s VLAN %u filter for VF %u, error %d\n",
608 vf->port_vlan_info ? "port" : "", vlan_id, vf->vf_id,
609 err);
610 return err;
611 }
612
613 return 0;
614}
615
616
617
618
619
620
621
622
623static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
624{
625 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
626 struct device *dev = ice_pf_to_dev(vf->pf);
627 enum ice_status status;
628 u8 broadcast[ETH_ALEN];
629
630 eth_broadcast_addr(broadcast);
631 status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
632 if (status) {
633 dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %s\n",
634 vf->vf_id, ice_stat_str(status));
635 return ice_status_to_errno(status);
636 }
637
638 vf->num_mac++;
639
640 if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) {
641 status = ice_fltr_add_mac(vsi, vf->dflt_lan_addr.addr,
642 ICE_FWD_TO_VSI);
643 if (status) {
644 dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %s\n",
645 &vf->dflt_lan_addr.addr[0], vf->vf_id,
646 ice_stat_str(status));
647 return ice_status_to_errno(status);
648 }
649 vf->num_mac++;
650 }
651
652 return 0;
653}
654
655
656
657
658
659static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
660{
661 if (vf->trusted)
662 set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
663 else
664 clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
665}
666
667
668
669
670
671
672
673
674
675static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
676{
677 int device_based_first_msix, device_based_last_msix;
678 int pf_based_first_msix, pf_based_last_msix, v;
679 struct ice_pf *pf = vf->pf;
680 int device_based_vf_id;
681 struct ice_hw *hw;
682 u32 reg;
683
684 hw = &pf->hw;
685 pf_based_first_msix = vf->first_vector_idx;
686 pf_based_last_msix = (pf_based_first_msix + pf->num_msix_per_vf) - 1;
687
688 device_based_first_msix = pf_based_first_msix +
689 pf->hw.func_caps.common_cap.msix_vector_first_id;
690 device_based_last_msix =
691 (device_based_first_msix + pf->num_msix_per_vf) - 1;
692 device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
693
694 reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
695 VPINT_ALLOC_FIRST_M) |
696 ((device_based_last_msix << VPINT_ALLOC_LAST_S) &
697 VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M);
698 wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
699
700 reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S)
701 & VPINT_ALLOC_PCI_FIRST_M) |
702 ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) &
703 VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M);
704 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
705
706
707 for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
708 reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
709 GLINT_VECT2FUNC_VF_NUM_M) |
710 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
711 GLINT_VECT2FUNC_PF_NUM_M));
712 wr32(hw, GLINT_VECT2FUNC(v), reg);
713 }
714
715
716 wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
717}
718
719
720
721
722
723
724
725static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
726{
727 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
728 struct device *dev = ice_pf_to_dev(vf->pf);
729 struct ice_hw *hw = &vf->pf->hw;
730 u32 reg;
731
732
733 wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
734
735
736 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
737
738
739
740
741 reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
742 VPLAN_TX_QBASE_VFFIRSTQ_M) |
743 (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
744 VPLAN_TX_QBASE_VFNUMQ_M));
745 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
746 } else {
747 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
748 }
749
750
751 wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
752
753
754 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
755
756
757
758
759 reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
760 VPLAN_RX_QBASE_VFFIRSTQ_M) |
761 (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
762 VPLAN_RX_QBASE_VFNUMQ_M));
763 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
764 } else {
765 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
766 }
767}
768
769
770
771
772
773static void ice_ena_vf_mappings(struct ice_vf *vf)
774{
775 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
776
777 ice_ena_vf_msix_mappings(vf);
778 ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
779}
780
781
782
783
784
785
786
787
788
789
790
791static int
792ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
793{
794 bool checked_min_res = false;
795 int res;
796
797
798
799
800
801
802
803
804 res = max_res;
805 while ((res >= min_res) && !checked_min_res) {
806 int num_all_res;
807
808 num_all_res = pf->num_alloc_vfs * res;
809 if (num_all_res <= avail_res)
810 return res;
811
812 if (res == min_res)
813 checked_min_res = true;
814
815 res = DIV_ROUND_UP(res, 2);
816 }
817 return 0;
818}
819
820
821
822
823
824
825int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
826{
827 struct ice_pf *pf;
828
829 if (!vf || !q_vector)
830 return -EINVAL;
831
832 pf = vf->pf;
833
834
835 return pf->sriov_base_vector + pf->num_msix_per_vf * vf->vf_id +
836 q_vector->v_idx + 1;
837}
838
839
840
841
842
843
844
845
846
847
848static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
849{
850 int i;
851
852 if (!res)
853 return -EINVAL;
854
855 for (i = res->num_entries - 1; i >= 0; i--)
856 if (res->list[i] & ICE_RES_VALID_BIT)
857 return i;
858
859 return 0;
860}
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
878{
879 u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
880 int vectors_used = pf->irq_tracker->num_entries;
881 int sriov_base_vector;
882
883 sriov_base_vector = total_vectors - num_msix_needed;
884
885
886
887
888 if (sriov_base_vector < vectors_used)
889 return -EINVAL;
890
891 pf->sriov_base_vector = sriov_base_vector;
892
893 return 0;
894}
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917static int ice_set_per_vf_res(struct ice_pf *pf)
918{
919 int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
920 int msix_avail_per_vf, msix_avail_for_sriov;
921 struct device *dev = ice_pf_to_dev(pf);
922 u16 num_msix_per_vf, num_txq, num_rxq;
923
924 if (!pf->num_alloc_vfs || max_valid_res_idx < 0)
925 return -EINVAL;
926
927
928 msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
929 pf->irq_tracker->num_entries;
930 msix_avail_per_vf = msix_avail_for_sriov / pf->num_alloc_vfs;
931 if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
932 num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
933 } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
934 num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
935 } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
936 num_msix_per_vf = ICE_MIN_INTR_PER_VF;
937 } else {
938 dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
939 msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
940 pf->num_alloc_vfs);
941 return -EIO;
942 }
943
944
945 num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf),
946 min_t(u16,
947 num_msix_per_vf - ICE_NONQ_VECS_VF,
948 ICE_MAX_RSS_QS_PER_VF),
949 ICE_MIN_QS_PER_VF);
950
951 num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf),
952 min_t(u16,
953 num_msix_per_vf - ICE_NONQ_VECS_VF,
954 ICE_MAX_RSS_QS_PER_VF),
955 ICE_MIN_QS_PER_VF);
956
957 if (!num_txq || !num_rxq) {
958 dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
959 ICE_MIN_QS_PER_VF, pf->num_alloc_vfs);
960 return -EIO;
961 }
962
963 if (ice_sriov_set_msix_res(pf, num_msix_per_vf * pf->num_alloc_vfs)) {
964 dev_err(dev, "Unable to set MSI-X resources for %d VFs\n",
965 pf->num_alloc_vfs);
966 return -EINVAL;
967 }
968
969
970 pf->num_qps_per_vf = min_t(int, num_txq, num_rxq);
971 pf->num_msix_per_vf = num_msix_per_vf;
972 dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
973 pf->num_alloc_vfs, pf->num_msix_per_vf, pf->num_qps_per_vf);
974
975 return 0;
976}
977
978
979
980
981
982static void ice_clear_vf_reset_trigger(struct ice_vf *vf)
983{
984 struct ice_hw *hw = &vf->pf->hw;
985 u32 reg;
986
987 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
988 reg &= ~VPGEN_VFRTRIG_VFSWR_M;
989 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
990 ice_flush(hw);
991}
992
993
994
995
996
997
998
999
1000
1001
1002
1003static enum ice_status
1004ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
1005 bool rm_promisc)
1006{
1007 struct ice_pf *pf = vf->pf;
1008 enum ice_status status = 0;
1009 struct ice_hw *hw;
1010
1011 hw = &pf->hw;
1012 if (vsi->num_vlan) {
1013 status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
1014 rm_promisc);
1015 } else if (vf->port_vlan_info) {
1016 if (rm_promisc)
1017 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
1018 vf->port_vlan_info);
1019 else
1020 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
1021 vf->port_vlan_info);
1022 } else {
1023 if (rm_promisc)
1024 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
1025 0);
1026 else
1027 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
1028 0);
1029 }
1030
1031 return status;
1032}
1033
1034static void ice_vf_clear_counters(struct ice_vf *vf)
1035{
1036 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
1037
1038 vf->num_mac = 0;
1039 vsi->num_vlan = 0;
1040 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
1041 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
1042}
1043
1044
1045
1046
1047
1048
1049
1050
1051static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
1052{
1053 ice_vf_clear_counters(vf);
1054 ice_clear_vf_reset_trigger(vf);
1055}
1056
1057
1058
1059
1060
1061static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
1062{
1063 struct device *dev = ice_pf_to_dev(vf->pf);
1064
1065 ice_vf_set_host_trust_cfg(vf);
1066
1067 if (ice_vf_rebuild_host_mac_cfg(vf))
1068 dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
1069 vf->vf_id);
1070
1071 if (ice_vf_rebuild_host_vlan_cfg(vf))
1072 dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
1073 vf->vf_id);
1074}
1075
1076
1077
1078
1079
1080
1081
1082
1083static int ice_vf_rebuild_vsi_with_release(struct ice_vf *vf)
1084{
1085 ice_vf_vsi_release(vf);
1086 if (!ice_vf_vsi_setup(vf))
1087 return -ENOMEM;
1088
1089 return 0;
1090}
1091
1092
1093
1094
1095
1096
1097
1098
1099static int ice_vf_rebuild_vsi(struct ice_vf *vf)
1100{
1101 struct ice_pf *pf = vf->pf;
1102 struct ice_vsi *vsi;
1103
1104 vsi = pf->vsi[vf->lan_vsi_idx];
1105
1106 if (ice_vsi_rebuild(vsi, true)) {
1107 dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
1108 vf->vf_id);
1109 return -EIO;
1110 }
1111
1112
1113
1114 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
1115 vf->lan_vsi_num = vsi->vsi_num;
1116
1117 return 0;
1118}
1119
1120
1121
1122
1123
1124
1125
1126
1127static void ice_vf_set_initialized(struct ice_vf *vf)
1128{
1129 ice_set_vf_state_qs_dis(vf);
1130 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
1131 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
1132 clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
1133 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
1134}
1135
1136
1137
1138
1139
1140static void ice_vf_post_vsi_rebuild(struct ice_vf *vf)
1141{
1142 struct ice_pf *pf = vf->pf;
1143 struct ice_hw *hw;
1144
1145 hw = &pf->hw;
1146
1147 ice_vf_rebuild_host_cfg(vf);
1148
1149 ice_vf_set_initialized(vf);
1150 ice_ena_vf_mappings(vf);
1151 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1152}
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
1167{
1168 struct device *dev = ice_pf_to_dev(pf);
1169 struct ice_hw *hw = &pf->hw;
1170 struct ice_vf *vf;
1171 int v, i;
1172
1173
1174 if (!pf->num_alloc_vfs)
1175 return false;
1176
1177
1178 if (test_and_set_bit(__ICE_VF_DIS, pf->state))
1179 return false;
1180
1181
1182 ice_for_each_vf(pf, v)
1183 ice_trigger_vf_reset(&pf->vf[v], is_vflr, true);
1184
1185
1186
1187
1188
1189
1190
1191 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1192
1193 while (v < pf->num_alloc_vfs) {
1194 u32 reg;
1195
1196 vf = &pf->vf[v];
1197 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1198 if (!(reg & VPGEN_VFRSTAT_VFRD_M)) {
1199
1200 usleep_range(10, 20);
1201 break;
1202 }
1203
1204
1205
1206
1207 v++;
1208 }
1209 }
1210
1211
1212
1213
1214 if (v < pf->num_alloc_vfs)
1215 dev_warn(dev, "VF reset check timeout\n");
1216
1217
1218 ice_for_each_vf(pf, v) {
1219 vf = &pf->vf[v];
1220
1221 ice_vf_pre_vsi_rebuild(vf);
1222 ice_vf_rebuild_vsi(vf);
1223 ice_vf_post_vsi_rebuild(vf);
1224 }
1225
1226 ice_flush(hw);
1227 clear_bit(__ICE_VF_DIS, pf->state);
1228
1229 return true;
1230}
1231
1232
1233
1234
1235
1236
1237
1238static bool ice_is_vf_disabled(struct ice_vf *vf)
1239{
1240 struct ice_pf *pf = vf->pf;
1241
1242
1243
1244
1245
1246
1247 return (test_bit(__ICE_VF_DIS, pf->state) ||
1248 test_bit(ICE_VF_STATE_DIS, vf->vf_states));
1249}
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
1260{
1261 struct ice_pf *pf = vf->pf;
1262 struct ice_vsi *vsi;
1263 struct device *dev;
1264 struct ice_hw *hw;
1265 bool rsd = false;
1266 u8 promisc_m;
1267 u32 reg;
1268 int i;
1269
1270 dev = ice_pf_to_dev(pf);
1271
1272 if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) {
1273 dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
1274 vf->vf_id);
1275 return true;
1276 }
1277
1278 if (ice_is_vf_disabled(vf)) {
1279 dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
1280 vf->vf_id);
1281 return true;
1282 }
1283
1284
1285 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1286 ice_trigger_vf_reset(vf, is_vflr, false);
1287
1288 vsi = pf->vsi[vf->lan_vsi_idx];
1289
1290 if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
1291 ice_dis_vf_qs(vf);
1292
1293
1294
1295
1296 ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
1297 NULL, ICE_VF_RESET, vf->vf_id, NULL);
1298
1299 hw = &pf->hw;
1300
1301
1302
1303 for (i = 0; i < 10; i++) {
1304
1305
1306
1307
1308 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1309 if (reg & VPGEN_VFRSTAT_VFRD_M) {
1310 rsd = true;
1311 break;
1312 }
1313
1314
1315 usleep_range(10, 20);
1316 }
1317
1318
1319
1320
1321 if (!rsd)
1322 dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
1323
1324
1325
1326
1327 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
1328 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
1329 if (vf->port_vlan_info || vsi->num_vlan)
1330 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
1331 else
1332 promisc_m = ICE_UCAST_PROMISC_BITS;
1333
1334 vsi = pf->vsi[vf->lan_vsi_idx];
1335 if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
1336 dev_err(dev, "disabling promiscuous mode failed\n");
1337 }
1338
1339 ice_vf_pre_vsi_rebuild(vf);
1340 ice_vf_rebuild_vsi_with_release(vf);
1341 ice_vf_post_vsi_rebuild(vf);
1342
1343 return true;
1344}
1345
1346
1347
1348
1349
1350void ice_vc_notify_link_state(struct ice_pf *pf)
1351{
1352 int i;
1353
1354 ice_for_each_vf(pf, i)
1355 ice_vc_notify_vf_link_state(&pf->vf[i]);
1356}
1357
1358
1359
1360
1361
1362
1363
1364void ice_vc_notify_reset(struct ice_pf *pf)
1365{
1366 struct virtchnl_pf_event pfe;
1367
1368 if (!pf->num_alloc_vfs)
1369 return;
1370
1371 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1372 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1373 ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
1374 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
1375}
1376
1377
1378
1379
1380
1381static void ice_vc_notify_vf_reset(struct ice_vf *vf)
1382{
1383 struct virtchnl_pf_event pfe;
1384 struct ice_pf *pf;
1385
1386 if (!vf)
1387 return;
1388
1389 pf = vf->pf;
1390 if (ice_validate_vf_id(pf, vf->vf_id))
1391 return;
1392
1393
1394
1395
1396 if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
1397 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
1398 test_bit(ICE_VF_STATE_DIS, vf->vf_states))
1399 return;
1400
1401 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1402 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1403 ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
1404 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
1405 NULL);
1406}
1407
1408
1409
1410
1411
1412
1413
1414
1415static int ice_init_vf_vsi_res(struct ice_vf *vf)
1416{
1417 struct ice_pf *pf = vf->pf;
1418 u8 broadcast[ETH_ALEN];
1419 enum ice_status status;
1420 struct ice_vsi *vsi;
1421 struct device *dev;
1422 int err;
1423
1424 vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
1425
1426 dev = ice_pf_to_dev(pf);
1427 vsi = ice_vf_vsi_setup(vf);
1428 if (!vsi)
1429 return -ENOMEM;
1430
1431 err = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI);
1432 if (err) {
1433 dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
1434 vf->vf_id);
1435 goto release_vsi;
1436 }
1437
1438 eth_broadcast_addr(broadcast);
1439 status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
1440 if (status) {
1441 dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %s\n",
1442 vf->vf_id, ice_stat_str(status));
1443 err = ice_status_to_errno(status);
1444 goto release_vsi;
1445 }
1446
1447 vf->num_mac = 1;
1448
1449 return 0;
1450
1451release_vsi:
1452 ice_vf_vsi_release(vf);
1453 return err;
1454}
1455
1456
1457
1458
1459
1460static int ice_start_vfs(struct ice_pf *pf)
1461{
1462 struct ice_hw *hw = &pf->hw;
1463 int retval, i;
1464
1465 ice_for_each_vf(pf, i) {
1466 struct ice_vf *vf = &pf->vf[i];
1467
1468 ice_clear_vf_reset_trigger(vf);
1469
1470 retval = ice_init_vf_vsi_res(vf);
1471 if (retval) {
1472 dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
1473 vf->vf_id, retval);
1474 goto teardown;
1475 }
1476
1477 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
1478 ice_ena_vf_mappings(vf);
1479 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1480 }
1481
1482 ice_flush(hw);
1483 return 0;
1484
1485teardown:
1486 for (i = i - 1; i >= 0; i--) {
1487 struct ice_vf *vf = &pf->vf[i];
1488
1489 ice_dis_vf_mappings(vf);
1490 ice_vf_vsi_release(vf);
1491 }
1492
1493 return retval;
1494}
1495
1496
1497
1498
1499
1500static void ice_set_dflt_settings_vfs(struct ice_pf *pf)
1501{
1502 int i;
1503
1504 ice_for_each_vf(pf, i) {
1505 struct ice_vf *vf = &pf->vf[i];
1506
1507 vf->pf = pf;
1508 vf->vf_id = i;
1509 vf->vf_sw_id = pf->first_sw;
1510
1511 set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vf->vf_caps);
1512 vf->spoofchk = true;
1513 vf->num_vf_qs = pf->num_qps_per_vf;
1514 }
1515}
1516
1517
1518
1519
1520
1521
1522static int ice_alloc_vfs(struct ice_pf *pf, int num_vfs)
1523{
1524 struct ice_vf *vfs;
1525
1526 vfs = devm_kcalloc(ice_pf_to_dev(pf), num_vfs, sizeof(*vfs),
1527 GFP_KERNEL);
1528 if (!vfs)
1529 return -ENOMEM;
1530
1531 pf->vf = vfs;
1532 pf->num_alloc_vfs = num_vfs;
1533
1534 return 0;
1535}
1536
1537
1538
1539
1540
1541
1542static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
1543{
1544 struct device *dev = ice_pf_to_dev(pf);
1545 struct ice_hw *hw = &pf->hw;
1546 int ret;
1547
1548
1549 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
1550 ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
1551 set_bit(__ICE_OICR_INTR_DIS, pf->state);
1552 ice_flush(hw);
1553
1554 ret = pci_enable_sriov(pf->pdev, num_vfs);
1555 if (ret) {
1556 pf->num_alloc_vfs = 0;
1557 goto err_unroll_intr;
1558 }
1559
1560 ret = ice_alloc_vfs(pf, num_vfs);
1561 if (ret)
1562 goto err_pci_disable_sriov;
1563
1564 if (ice_set_per_vf_res(pf)) {
1565 dev_err(dev, "Not enough resources for %d VFs, try with fewer number of VFs\n",
1566 num_vfs);
1567 ret = -ENOSPC;
1568 goto err_unroll_sriov;
1569 }
1570
1571 ice_set_dflt_settings_vfs(pf);
1572
1573 if (ice_start_vfs(pf)) {
1574 dev_err(dev, "Failed to start VF(s)\n");
1575 ret = -EAGAIN;
1576 goto err_unroll_sriov;
1577 }
1578
1579 clear_bit(__ICE_VF_DIS, pf->state);
1580 return 0;
1581
1582err_unroll_sriov:
1583 devm_kfree(dev, pf->vf);
1584 pf->vf = NULL;
1585 pf->num_alloc_vfs = 0;
1586err_pci_disable_sriov:
1587 pci_disable_sriov(pf->pdev);
1588err_unroll_intr:
1589
1590 ice_irq_dynamic_ena(hw, NULL, NULL);
1591 clear_bit(__ICE_OICR_INTR_DIS, pf->state);
1592 return ret;
1593}
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606static bool ice_pf_state_is_nominal(struct ice_pf *pf)
1607{
1608 DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 };
1609
1610 if (!pf)
1611 return false;
1612
1613 bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS);
1614 if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS))
1615 return false;
1616
1617 return true;
1618}
1619
1620
1621
1622
1623
1624
1625
1626
1627static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
1628{
1629 int pre_existing_vfs = pci_num_vf(pf->pdev);
1630 struct device *dev = ice_pf_to_dev(pf);
1631 int err;
1632
1633 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1634 ice_free_vfs(pf);
1635 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1636 return 0;
1637
1638 if (num_vfs > pf->num_vfs_supported) {
1639 dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
1640 num_vfs, pf->num_vfs_supported);
1641 return -EOPNOTSUPP;
1642 }
1643
1644 dev_info(dev, "Enabling %d VFs\n", num_vfs);
1645 err = ice_ena_vfs(pf, num_vfs);
1646 if (err) {
1647 dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
1648 return err;
1649 }
1650
1651 set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
1652 return 0;
1653}
1654
1655
1656
1657
1658
1659static int ice_check_sriov_allowed(struct ice_pf *pf)
1660{
1661 struct device *dev = ice_pf_to_dev(pf);
1662
1663 if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
1664 dev_err(dev, "This device is not capable of SR-IOV\n");
1665 return -EOPNOTSUPP;
1666 }
1667
1668 if (ice_is_safe_mode(pf)) {
1669 dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
1670 return -EOPNOTSUPP;
1671 }
1672
1673 if (!ice_pf_state_is_nominal(pf)) {
1674 dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
1675 return -EBUSY;
1676 }
1677
1678 return 0;
1679}
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
1691{
1692 struct ice_pf *pf = pci_get_drvdata(pdev);
1693 struct device *dev = ice_pf_to_dev(pf);
1694 int err;
1695
1696 err = ice_check_sriov_allowed(pf);
1697 if (err)
1698 return err;
1699
1700 if (!num_vfs) {
1701 if (!pci_vfs_assigned(pdev)) {
1702 ice_free_vfs(pf);
1703 return 0;
1704 }
1705
1706 dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
1707 return -EBUSY;
1708 }
1709
1710 err = ice_pci_sriov_ena(pf, num_vfs);
1711 if (err)
1712 return err;
1713
1714 return num_vfs;
1715}
1716
1717
1718
1719
1720
1721
1722
1723
1724void ice_process_vflr_event(struct ice_pf *pf)
1725{
1726 struct ice_hw *hw = &pf->hw;
1727 unsigned int vf_id;
1728 u32 reg;
1729
1730 if (!test_and_clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
1731 !pf->num_alloc_vfs)
1732 return;
1733
1734 ice_for_each_vf(pf, vf_id) {
1735 struct ice_vf *vf = &pf->vf[vf_id];
1736 u32 reg_idx, bit_idx;
1737
1738 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1739 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1740
1741 reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
1742 if (reg & BIT(bit_idx))
1743
1744 ice_reset_vf(vf, true);
1745 }
1746}
1747
1748
1749
1750
1751
1752static void ice_vc_reset_vf(struct ice_vf *vf)
1753{
1754 ice_vc_notify_vf_reset(vf);
1755 ice_reset_vf(vf, false);
1756}
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
1767{
1768 unsigned int vf_id;
1769
1770 ice_for_each_vf(pf, vf_id) {
1771 struct ice_vf *vf = &pf->vf[vf_id];
1772 struct ice_vsi *vsi;
1773 u16 rxq_idx;
1774
1775 vsi = pf->vsi[vf->lan_vsi_idx];
1776
1777 ice_for_each_rxq(vsi, rxq_idx)
1778 if (vsi->rxq_map[rxq_idx] == pfq)
1779 return vf;
1780 }
1781
1782 return NULL;
1783}
1784
1785
1786
1787
1788
1789
1790static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
1791{
1792 return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
1793}
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804void
1805ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1806{
1807 u32 gldcb_rtctq, queue;
1808 struct ice_vf *vf;
1809
1810 gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
1811 dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
1812
1813
1814 queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >>
1815 GLDCB_RTCTQ_RXQNUM_S;
1816
1817 vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
1818 if (!vf)
1819 return;
1820
1821 ice_vc_reset_vf(vf);
1822}
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834static int
1835ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
1836 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
1837{
1838 enum ice_status aq_ret;
1839 struct device *dev;
1840 struct ice_pf *pf;
1841
1842 if (!vf)
1843 return -EINVAL;
1844
1845 pf = vf->pf;
1846 if (ice_validate_vf_id(pf, vf->vf_id))
1847 return -EINVAL;
1848
1849 dev = ice_pf_to_dev(pf);
1850
1851
1852 if (v_retval) {
1853 vf->num_inval_msgs++;
1854 dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id,
1855 v_opcode, v_retval);
1856 if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
1857 dev_err(dev, "Number of invalid messages exceeded for VF %d\n",
1858 vf->vf_id);
1859 dev_err(dev, "Use PF Control I/F to enable the VF\n");
1860 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1861 return -EIO;
1862 }
1863 } else {
1864 vf->num_valid_msgs++;
1865
1866 vf->num_inval_msgs = 0;
1867 }
1868
1869 aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
1870 msg, msglen, NULL);
1871 if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
1872 dev_info(dev, "Unable to send the message to VF %d ret %s aq_err %s\n",
1873 vf->vf_id, ice_stat_str(aq_ret),
1874 ice_aq_str(pf->hw.mailboxq.sq_last_status));
1875 return -EIO;
1876 }
1877
1878 return 0;
1879}
1880
1881
1882
1883
1884
1885
1886
1887
1888static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
1889{
1890 struct virtchnl_version_info info = {
1891 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
1892 };
1893
1894 vf->vf_ver = *(struct virtchnl_version_info *)msg;
1895
1896 if (VF_IS_V10(&vf->vf_ver))
1897 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1898
1899 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
1900 VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
1901 sizeof(struct virtchnl_version_info));
1902}
1903
1904
1905
1906
1907
1908
1909
1910
1911static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
1912{
1913 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1914 struct virtchnl_vf_resource *vfres = NULL;
1915 struct ice_pf *pf = vf->pf;
1916 struct ice_vsi *vsi;
1917 int len = 0;
1918 int ret;
1919
1920 if (ice_check_vf_init(pf, vf)) {
1921 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1922 goto err;
1923 }
1924
1925 len = sizeof(struct virtchnl_vf_resource);
1926
1927 vfres = kzalloc(len, GFP_KERNEL);
1928 if (!vfres) {
1929 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1930 len = 0;
1931 goto err;
1932 }
1933 if (VF_IS_V11(&vf->vf_ver))
1934 vf->driver_caps = *(u32 *)msg;
1935 else
1936 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
1937 VIRTCHNL_VF_OFFLOAD_RSS_REG |
1938 VIRTCHNL_VF_OFFLOAD_VLAN;
1939
1940 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
1941 vsi = pf->vsi[vf->lan_vsi_idx];
1942 if (!vsi) {
1943 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1944 goto err;
1945 }
1946
1947 if (!vsi->info.pvid)
1948 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
1949
1950 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1951 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
1952 } else {
1953 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
1954 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
1955 else
1956 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
1957 }
1958
1959 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1960 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
1961
1962 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
1963 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
1964
1965 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
1966 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
1967
1968 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
1969 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
1970
1971 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1972 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
1973
1974 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
1975 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
1976
1977 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
1978 vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
1979
1980 vfres->num_vsis = 1;
1981
1982 vfres->num_queue_pairs = vsi->num_txq;
1983 vfres->max_vectors = pf->num_msix_per_vf;
1984 vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
1985 vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
1986
1987 vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
1988 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
1989 vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
1990 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
1991 vf->dflt_lan_addr.addr);
1992
1993
1994 vf->driver_caps = vfres->vf_cap_flags;
1995
1996 set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
1997
1998err:
1999
2000 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
2001 (u8 *)vfres, len);
2002
2003 kfree(vfres);
2004 return ret;
2005}
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015static void ice_vc_reset_vf_msg(struct ice_vf *vf)
2016{
2017 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
2018 ice_reset_vf(vf, false);
2019}
2020
2021
2022
2023
2024
2025
2026
2027
2028static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
2029{
2030 int i;
2031
2032 ice_for_each_vsi(pf, i)
2033 if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
2034 return pf->vsi[i];
2035
2036 return NULL;
2037}
2038
2039
2040
2041
2042
2043
2044
2045
2046static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
2047{
2048 struct ice_pf *pf = vf->pf;
2049 struct ice_vsi *vsi;
2050
2051 vsi = ice_find_vsi_from_id(pf, vsi_id);
2052
2053 return (vsi && (vsi->vf_id == vf->vf_id));
2054}
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
2065{
2066 struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
2067
2068 return (vsi && (qid < vsi->alloc_txq));
2069}
2070
2071
2072
2073
2074
2075
2076
2077
2078static bool ice_vc_isvalid_ring_len(u16 ring_len)
2079{
2080 return ring_len == 0 ||
2081 (ring_len >= ICE_MIN_NUM_DESC &&
2082 ring_len <= ICE_MAX_NUM_DESC &&
2083 !(ring_len % ICE_REQ_DESC_MULTIPLE));
2084}
2085
2086
2087
2088
2089
2090
2091
2092
2093static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
2094{
2095 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2096 struct virtchnl_rss_key *vrk =
2097 (struct virtchnl_rss_key *)msg;
2098 struct ice_pf *pf = vf->pf;
2099 struct ice_vsi *vsi;
2100
2101 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2102 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2103 goto error_param;
2104 }
2105
2106 if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
2107 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2108 goto error_param;
2109 }
2110
2111 if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
2112 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2113 goto error_param;
2114 }
2115
2116 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
2117 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2118 goto error_param;
2119 }
2120
2121 vsi = pf->vsi[vf->lan_vsi_idx];
2122 if (!vsi) {
2123 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2124 goto error_param;
2125 }
2126
2127 if (ice_set_rss(vsi, vrk->key, NULL, 0))
2128 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2129error_param:
2130 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
2131 NULL, 0);
2132}
2133
2134
2135
2136
2137
2138
2139
2140
2141static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
2142{
2143 struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
2144 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2145 struct ice_pf *pf = vf->pf;
2146 struct ice_vsi *vsi;
2147
2148 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2149 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2150 goto error_param;
2151 }
2152
2153 if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
2154 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2155 goto error_param;
2156 }
2157
2158 if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
2159 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2160 goto error_param;
2161 }
2162
2163 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
2164 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2165 goto error_param;
2166 }
2167
2168 vsi = pf->vsi[vf->lan_vsi_idx];
2169 if (!vsi) {
2170 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2171 goto error_param;
2172 }
2173
2174 if (ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
2175 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2176error_param:
2177 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
2178 NULL, 0);
2179}
2180
2181
2182
2183
2184
2185
2186
2187
2188static void ice_wait_on_vf_reset(struct ice_vf *vf)
2189{
2190 int i;
2191
2192 for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
2193 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
2194 break;
2195 msleep(ICE_MAX_VF_RESET_SLEEP_MS);
2196 }
2197}
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207static int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
2208{
2209 struct ice_pf *pf;
2210
2211 ice_wait_on_vf_reset(vf);
2212
2213 if (ice_is_vf_disabled(vf))
2214 return -EINVAL;
2215
2216 pf = vf->pf;
2217 if (ice_check_vf_init(pf, vf))
2218 return -EBUSY;
2219
2220 return 0;
2221}
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
2232{
2233 struct ice_netdev_priv *np = netdev_priv(netdev);
2234 struct ice_pf *pf = np->vsi->back;
2235 struct ice_vsi_ctx *ctx;
2236 struct ice_vsi *vf_vsi;
2237 enum ice_status status;
2238 struct device *dev;
2239 struct ice_vf *vf;
2240 int ret;
2241
2242 dev = ice_pf_to_dev(pf);
2243 if (ice_validate_vf_id(pf, vf_id))
2244 return -EINVAL;
2245
2246 vf = &pf->vf[vf_id];
2247 ret = ice_check_vf_ready_for_cfg(vf);
2248 if (ret)
2249 return ret;
2250
2251 vf_vsi = pf->vsi[vf->lan_vsi_idx];
2252 if (!vf_vsi) {
2253 netdev_err(netdev, "VSI %d for VF %d is null\n",
2254 vf->lan_vsi_idx, vf->vf_id);
2255 return -EINVAL;
2256 }
2257
2258 if (vf_vsi->type != ICE_VSI_VF) {
2259 netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
2260 vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
2261 return -ENODEV;
2262 }
2263
2264 if (ena == vf->spoofchk) {
2265 dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
2266 return 0;
2267 }
2268
2269 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2270 if (!ctx)
2271 return -ENOMEM;
2272
2273 ctx->info.sec_flags = vf_vsi->info.sec_flags;
2274 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
2275 if (ena) {
2276 ctx->info.sec_flags |=
2277 ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
2278 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2279 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
2280 } else {
2281 ctx->info.sec_flags &=
2282 ~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
2283 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2284 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
2285 }
2286
2287 status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
2288 if (status) {
2289 dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %s\n",
2290 ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num,
2291 ice_stat_str(status));
2292 ret = -EIO;
2293 goto out;
2294 }
2295
2296
2297 vf_vsi->info.sec_flags = ctx->info.sec_flags;
2298 vf->spoofchk = ena;
2299
2300out:
2301 kfree(ctx);
2302 return ret;
2303}
2304
2305
2306
2307
2308
2309
2310
2311
2312bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
2313{
2314 int vf_idx;
2315
2316 ice_for_each_vf(pf, vf_idx) {
2317 struct ice_vf *vf = &pf->vf[vf_idx];
2318
2319
2320 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
2321 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
2322 return true;
2323 }
2324
2325 return false;
2326}
2327
2328
2329
2330
2331
2332
2333
2334
2335static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
2336{
2337 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2338 struct virtchnl_promisc_info *info =
2339 (struct virtchnl_promisc_info *)msg;
2340 struct ice_pf *pf = vf->pf;
2341 struct ice_vsi *vsi;
2342 struct device *dev;
2343 bool rm_promisc;
2344 int ret = 0;
2345
2346 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2347 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2348 goto error_param;
2349 }
2350
2351 if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) {
2352 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2353 goto error_param;
2354 }
2355
2356 vsi = pf->vsi[vf->lan_vsi_idx];
2357 if (!vsi) {
2358 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2359 goto error_param;
2360 }
2361
2362 dev = ice_pf_to_dev(pf);
2363 if (!test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2364 dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n",
2365 vf->vf_id);
2366
2367 goto error_param;
2368 }
2369
2370 rm_promisc = !(info->flags & FLAG_VF_UNICAST_PROMISC) &&
2371 !(info->flags & FLAG_VF_MULTICAST_PROMISC);
2372
2373 if (vsi->num_vlan || vf->port_vlan_info) {
2374 struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
2375 struct net_device *pf_netdev;
2376
2377 if (!pf_vsi) {
2378 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2379 goto error_param;
2380 }
2381
2382 pf_netdev = pf_vsi->netdev;
2383
2384 ret = ice_set_vf_spoofchk(pf_netdev, vf->vf_id, rm_promisc);
2385 if (ret) {
2386 dev_err(dev, "Failed to update spoofchk to %s for VF %d VSI %d when setting promiscuous mode\n",
2387 rm_promisc ? "ON" : "OFF", vf->vf_id,
2388 vsi->vsi_num);
2389 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2390 }
2391
2392 ret = ice_cfg_vlan_pruning(vsi, true, !rm_promisc);
2393 if (ret) {
2394 dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
2395 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2396 goto error_param;
2397 }
2398 }
2399
2400 if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
2401 bool set_dflt_vsi = !!(info->flags & FLAG_VF_UNICAST_PROMISC);
2402
2403 if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw))
2404
2405
2406
2407 ret = ice_set_dflt_vsi(pf->first_sw, vsi);
2408 else if (!set_dflt_vsi &&
2409 ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
2410
2411
2412
2413 ret = ice_clear_dflt_vsi(pf->first_sw);
2414
2415 if (ret) {
2416 dev_err(dev, "%sable VF %d as the default VSI failed, error %d\n",
2417 set_dflt_vsi ? "en" : "dis", vf->vf_id, ret);
2418 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2419 goto error_param;
2420 }
2421 } else {
2422 enum ice_status status;
2423 u8 promisc_m;
2424
2425 if (info->flags & FLAG_VF_UNICAST_PROMISC) {
2426 if (vf->port_vlan_info || vsi->num_vlan)
2427 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
2428 else
2429 promisc_m = ICE_UCAST_PROMISC_BITS;
2430 } else if (info->flags & FLAG_VF_MULTICAST_PROMISC) {
2431 if (vf->port_vlan_info || vsi->num_vlan)
2432 promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
2433 else
2434 promisc_m = ICE_MCAST_PROMISC_BITS;
2435 } else {
2436 if (vf->port_vlan_info || vsi->num_vlan)
2437 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
2438 else
2439 promisc_m = ICE_UCAST_PROMISC_BITS;
2440 }
2441
2442
2443
2444
2445 status = ice_vf_set_vsi_promisc(vf, vsi, promisc_m, rm_promisc);
2446 if (status) {
2447 dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed, error: %s\n",
2448 rm_promisc ? "dis" : "en", vf->vf_id,
2449 ice_stat_str(status));
2450 v_ret = ice_err_to_virt_err(status);
2451 goto error_param;
2452 } else {
2453 dev_dbg(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d succeeded\n",
2454 rm_promisc ? "dis" : "en", vf->vf_id);
2455 }
2456 }
2457
2458 if (info->flags & FLAG_VF_MULTICAST_PROMISC)
2459 set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
2460 else
2461 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
2462
2463 if (info->flags & FLAG_VF_UNICAST_PROMISC)
2464 set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
2465 else
2466 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
2467
2468error_param:
2469 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
2470 v_ret, NULL, 0);
2471}
2472
2473
2474
2475
2476
2477
2478
2479
2480static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
2481{
2482 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2483 struct virtchnl_queue_select *vqs =
2484 (struct virtchnl_queue_select *)msg;
2485 struct ice_eth_stats stats = { 0 };
2486 struct ice_pf *pf = vf->pf;
2487 struct ice_vsi *vsi;
2488
2489 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2490 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2491 goto error_param;
2492 }
2493
2494 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2495 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2496 goto error_param;
2497 }
2498
2499 vsi = pf->vsi[vf->lan_vsi_idx];
2500 if (!vsi) {
2501 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2502 goto error_param;
2503 }
2504
2505 ice_update_eth_stats(vsi);
2506
2507 stats = vsi->eth_stats;
2508
2509error_param:
2510
2511 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
2512 (u8 *)&stats, sizeof(stats));
2513}
2514
2515
2516
2517
2518
2519
2520
2521static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
2522{
2523 if ((!vqs->rx_queues && !vqs->tx_queues) ||
2524 vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
2525 vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
2526 return false;
2527
2528 return true;
2529}
2530
2531
2532
2533
2534
2535
2536static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
2537{
2538 struct ice_hw *hw = &vsi->back->hw;
2539 u32 pfq = vsi->txq_map[q_idx];
2540 u32 reg;
2541
2542 reg = rd32(hw, QINT_TQCTL(pfq));
2543
2544
2545
2546
2547
2548 if (!(reg & QINT_TQCTL_MSIX_INDX_M))
2549 return;
2550
2551 wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
2552}
2553
2554
2555
2556
2557
2558
2559static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
2560{
2561 struct ice_hw *hw = &vsi->back->hw;
2562 u32 pfq = vsi->rxq_map[q_idx];
2563 u32 reg;
2564
2565 reg = rd32(hw, QINT_RQCTL(pfq));
2566
2567
2568
2569
2570
2571 if (!(reg & QINT_RQCTL_MSIX_INDX_M))
2572 return;
2573
2574 wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
2575}
2576
2577
2578
2579
2580
2581
2582
2583
2584static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
2585{
2586 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2587 struct virtchnl_queue_select *vqs =
2588 (struct virtchnl_queue_select *)msg;
2589 struct ice_pf *pf = vf->pf;
2590 struct ice_vsi *vsi;
2591 unsigned long q_map;
2592 u16 vf_q_id;
2593
2594 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2595 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2596 goto error_param;
2597 }
2598
2599 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2600 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2601 goto error_param;
2602 }
2603
2604 if (!ice_vc_validate_vqs_bitmaps(vqs)) {
2605 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2606 goto error_param;
2607 }
2608
2609 vsi = pf->vsi[vf->lan_vsi_idx];
2610 if (!vsi) {
2611 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2612 goto error_param;
2613 }
2614
2615
2616
2617
2618
2619 q_map = vqs->rx_queues;
2620 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
2621 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2622 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2623 goto error_param;
2624 }
2625
2626
2627 if (test_bit(vf_q_id, vf->rxq_ena))
2628 continue;
2629
2630 if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
2631 dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
2632 vf_q_id, vsi->vsi_num);
2633 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2634 goto error_param;
2635 }
2636
2637 ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
2638 set_bit(vf_q_id, vf->rxq_ena);
2639 }
2640
2641 vsi = pf->vsi[vf->lan_vsi_idx];
2642 q_map = vqs->tx_queues;
2643 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
2644 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2645 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2646 goto error_param;
2647 }
2648
2649
2650 if (test_bit(vf_q_id, vf->txq_ena))
2651 continue;
2652
2653 ice_vf_ena_txq_interrupt(vsi, vf_q_id);
2654 set_bit(vf_q_id, vf->txq_ena);
2655 }
2656
2657
2658 if (v_ret == VIRTCHNL_STATUS_SUCCESS)
2659 set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
2660
2661error_param:
2662
2663 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
2664 NULL, 0);
2665}
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
2676{
2677 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2678 struct virtchnl_queue_select *vqs =
2679 (struct virtchnl_queue_select *)msg;
2680 struct ice_pf *pf = vf->pf;
2681 struct ice_vsi *vsi;
2682 unsigned long q_map;
2683 u16 vf_q_id;
2684
2685 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
2686 !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
2687 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2688 goto error_param;
2689 }
2690
2691 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2692 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2693 goto error_param;
2694 }
2695
2696 if (!ice_vc_validate_vqs_bitmaps(vqs)) {
2697 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2698 goto error_param;
2699 }
2700
2701 vsi = pf->vsi[vf->lan_vsi_idx];
2702 if (!vsi) {
2703 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2704 goto error_param;
2705 }
2706
2707 if (vqs->tx_queues) {
2708 q_map = vqs->tx_queues;
2709
2710 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
2711 struct ice_ring *ring = vsi->tx_rings[vf_q_id];
2712 struct ice_txq_meta txq_meta = { 0 };
2713
2714 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2715 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2716 goto error_param;
2717 }
2718
2719
2720 if (!test_bit(vf_q_id, vf->txq_ena))
2721 continue;
2722
2723 ice_fill_txq_meta(vsi, ring, &txq_meta);
2724
2725 if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
2726 ring, &txq_meta)) {
2727 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
2728 vf_q_id, vsi->vsi_num);
2729 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2730 goto error_param;
2731 }
2732
2733
2734 clear_bit(vf_q_id, vf->txq_ena);
2735 }
2736 }
2737
2738 q_map = vqs->rx_queues;
2739
2740 if (q_map &&
2741 bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
2742 if (ice_vsi_stop_all_rx_rings(vsi)) {
2743 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
2744 vsi->vsi_num);
2745 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2746 goto error_param;
2747 }
2748
2749 bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
2750 } else if (q_map) {
2751 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
2752 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2753 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2754 goto error_param;
2755 }
2756
2757
2758 if (!test_bit(vf_q_id, vf->rxq_ena))
2759 continue;
2760
2761 if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
2762 true)) {
2763 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
2764 vf_q_id, vsi->vsi_num);
2765 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2766 goto error_param;
2767 }
2768
2769
2770 clear_bit(vf_q_id, vf->rxq_ena);
2771 }
2772 }
2773
2774
2775 if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
2776 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
2777
2778error_param:
2779
2780 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
2781 NULL, 0);
2782}
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793static int
2794ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
2795 struct virtchnl_vector_map *map,
2796 struct ice_q_vector *q_vector)
2797{
2798 u16 vsi_q_id, vsi_q_id_idx;
2799 unsigned long qmap;
2800
2801 q_vector->num_ring_rx = 0;
2802 q_vector->num_ring_tx = 0;
2803
2804 qmap = map->rxq_map;
2805 for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
2806 vsi_q_id = vsi_q_id_idx;
2807
2808 if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
2809 return VIRTCHNL_STATUS_ERR_PARAM;
2810
2811 q_vector->num_ring_rx++;
2812 q_vector->rx.itr_idx = map->rxitr_idx;
2813 vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
2814 ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
2815 q_vector->rx.itr_idx);
2816 }
2817
2818 qmap = map->txq_map;
2819 for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
2820 vsi_q_id = vsi_q_id_idx;
2821
2822 if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
2823 return VIRTCHNL_STATUS_ERR_PARAM;
2824
2825 q_vector->num_ring_tx++;
2826 q_vector->tx.itr_idx = map->txitr_idx;
2827 vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
2828 ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
2829 q_vector->tx.itr_idx);
2830 }
2831
2832 return VIRTCHNL_STATUS_SUCCESS;
2833}
2834
2835
2836
2837
2838
2839
2840
2841
2842static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
2843{
2844 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2845 u16 num_q_vectors_mapped, vsi_id, vector_id;
2846 struct virtchnl_irq_map_info *irqmap_info;
2847 struct virtchnl_vector_map *map;
2848 struct ice_pf *pf = vf->pf;
2849 struct ice_vsi *vsi;
2850 int i;
2851
2852 irqmap_info = (struct virtchnl_irq_map_info *)msg;
2853 num_q_vectors_mapped = irqmap_info->num_vectors;
2854
2855
2856
2857
2858
2859 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
2860 pf->num_msix_per_vf < num_q_vectors_mapped ||
2861 !num_q_vectors_mapped) {
2862 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2863 goto error_param;
2864 }
2865
2866 vsi = pf->vsi[vf->lan_vsi_idx];
2867 if (!vsi) {
2868 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2869 goto error_param;
2870 }
2871
2872 for (i = 0; i < num_q_vectors_mapped; i++) {
2873 struct ice_q_vector *q_vector;
2874
2875 map = &irqmap_info->vecmap[i];
2876
2877 vector_id = map->vector_id;
2878 vsi_id = map->vsi_id;
2879
2880
2881
2882 if (!(vector_id < pf->num_msix_per_vf) ||
2883 !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
2884 (!vector_id && (map->rxq_map || map->txq_map))) {
2885 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2886 goto error_param;
2887 }
2888
2889
2890 if (!vector_id)
2891 continue;
2892
2893
2894
2895
2896 q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
2897 if (!q_vector) {
2898 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2899 goto error_param;
2900 }
2901
2902
2903 v_ret = (enum virtchnl_status_code)
2904 ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector);
2905 if (v_ret)
2906 goto error_param;
2907 }
2908
2909error_param:
2910
2911 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
2912 NULL, 0);
2913}
2914
2915
2916
2917
2918
2919
2920
2921
2922static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
2923{
2924 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2925 struct virtchnl_vsi_queue_config_info *qci =
2926 (struct virtchnl_vsi_queue_config_info *)msg;
2927 struct virtchnl_queue_pair_info *qpi;
2928 u16 num_rxq = 0, num_txq = 0;
2929 struct ice_pf *pf = vf->pf;
2930 struct ice_vsi *vsi;
2931 int i;
2932
2933 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2934 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2935 goto error_param;
2936 }
2937
2938 if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
2939 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2940 goto error_param;
2941 }
2942
2943 vsi = pf->vsi[vf->lan_vsi_idx];
2944 if (!vsi) {
2945 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2946 goto error_param;
2947 }
2948
2949 if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
2950 qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
2951 dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
2952 vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
2953 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2954 goto error_param;
2955 }
2956
2957 for (i = 0; i < qci->num_queue_pairs; i++) {
2958 qpi = &qci->qpair[i];
2959 if (qpi->txq.vsi_id != qci->vsi_id ||
2960 qpi->rxq.vsi_id != qci->vsi_id ||
2961 qpi->rxq.queue_id != qpi->txq.queue_id ||
2962 qpi->txq.headwb_enabled ||
2963 !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
2964 !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
2965 !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
2966 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2967 goto error_param;
2968 }
2969
2970 if (qpi->txq.ring_len > 0) {
2971 num_txq++;
2972 vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
2973 vsi->tx_rings[i]->count = qpi->txq.ring_len;
2974 }
2975
2976
2977 if (qpi->rxq.ring_len > 0) {
2978 num_rxq++;
2979 vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
2980 vsi->rx_rings[i]->count = qpi->rxq.ring_len;
2981
2982 if (qpi->rxq.databuffer_size != 0 &&
2983 (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
2984 qpi->rxq.databuffer_size < 1024)) {
2985 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2986 goto error_param;
2987 }
2988 vsi->rx_buf_len = qpi->rxq.databuffer_size;
2989 vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
2990 if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
2991 qpi->rxq.max_pkt_size < 64) {
2992 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2993 goto error_param;
2994 }
2995 }
2996
2997 vsi->max_frame = qpi->rxq.max_pkt_size;
2998 }
2999
3000
3001
3002
3003 vsi->num_txq = num_txq;
3004 vsi->num_rxq = num_rxq;
3005
3006 vsi->tc_cfg.tc_info[0].qcount_tx = num_txq;
3007 vsi->tc_cfg.tc_info[0].qcount_rx = num_rxq;
3008
3009 if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi))
3010 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
3011
3012error_param:
3013
3014 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
3015 NULL, 0);
3016}
3017
3018
3019
3020
3021
3022static bool ice_is_vf_trusted(struct ice_vf *vf)
3023{
3024 return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
3025}
3026
3027
3028
3029
3030
3031
3032
3033static bool ice_can_vf_change_mac(struct ice_vf *vf)
3034{
3035
3036
3037
3038
3039 if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
3040 return false;
3041
3042 return true;
3043}
3044
3045
3046
3047
3048
3049
3050
3051static int
3052ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
3053{
3054 struct device *dev = ice_pf_to_dev(vf->pf);
3055 enum ice_status status;
3056
3057
3058 if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3059 return 0;
3060
3061 if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
3062 dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
3063 return -EPERM;
3064 }
3065
3066 status = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
3067 if (status == ICE_ERR_ALREADY_EXISTS) {
3068 dev_err(dev, "MAC %pM already exists for VF %d\n", mac_addr,
3069 vf->vf_id);
3070 return -EEXIST;
3071 } else if (status) {
3072 dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %s\n",
3073 mac_addr, vf->vf_id, ice_stat_str(status));
3074 return -EIO;
3075 }
3076
3077
3078
3079
3080
3081 if (is_unicast_ether_addr(mac_addr))
3082 ether_addr_copy(vf->dflt_lan_addr.addr, mac_addr);
3083
3084 vf->num_mac++;
3085
3086 return 0;
3087}
3088
3089
3090
3091
3092
3093
3094
3095static int
3096ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
3097{
3098 struct device *dev = ice_pf_to_dev(vf->pf);
3099 enum ice_status status;
3100
3101 if (!ice_can_vf_change_mac(vf) &&
3102 ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3103 return 0;
3104
3105 status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
3106 if (status == ICE_ERR_DOES_NOT_EXIST) {
3107 dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
3108 vf->vf_id);
3109 return -ENOENT;
3110 } else if (status) {
3111 dev_err(dev, "Failed to delete MAC %pM for VF %d, error %s\n",
3112 mac_addr, vf->vf_id, ice_stat_str(status));
3113 return -EIO;
3114 }
3115
3116 if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3117 eth_zero_addr(vf->dflt_lan_addr.addr);
3118
3119 vf->num_mac--;
3120
3121 return 0;
3122}
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132static int
3133ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
3134{
3135 int (*ice_vc_cfg_mac)
3136 (struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr);
3137 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3138 struct virtchnl_ether_addr_list *al =
3139 (struct virtchnl_ether_addr_list *)msg;
3140 struct ice_pf *pf = vf->pf;
3141 enum virtchnl_ops vc_op;
3142 struct ice_vsi *vsi;
3143 int i;
3144
3145 if (set) {
3146 vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
3147 ice_vc_cfg_mac = ice_vc_add_mac_addr;
3148 } else {
3149 vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
3150 ice_vc_cfg_mac = ice_vc_del_mac_addr;
3151 }
3152
3153 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
3154 !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
3155 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3156 goto handle_mac_exit;
3157 }
3158
3159
3160
3161
3162
3163 if (set && !ice_is_vf_trusted(vf) &&
3164 (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
3165 dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
3166 vf->vf_id);
3167 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3168 goto handle_mac_exit;
3169 }
3170
3171 vsi = pf->vsi[vf->lan_vsi_idx];
3172 if (!vsi) {
3173 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3174 goto handle_mac_exit;
3175 }
3176
3177 for (i = 0; i < al->num_elements; i++) {
3178 u8 *mac_addr = al->list[i].addr;
3179 int result;
3180
3181 if (is_broadcast_ether_addr(mac_addr) ||
3182 is_zero_ether_addr(mac_addr))
3183 continue;
3184
3185 result = ice_vc_cfg_mac(vf, vsi, mac_addr);
3186 if (result == -EEXIST || result == -ENOENT) {
3187 continue;
3188 } else if (result) {
3189 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
3190 goto handle_mac_exit;
3191 }
3192 }
3193
3194handle_mac_exit:
3195
3196 return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
3197}
3198
3199
3200
3201
3202
3203
3204
3205
3206static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
3207{
3208 return ice_vc_handle_mac_addr_msg(vf, msg, true);
3209}
3210
3211
3212
3213
3214
3215
3216
3217
3218static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
3219{
3220 return ice_vc_handle_mac_addr_msg(vf, msg, false);
3221}
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
3234{
3235 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3236 struct virtchnl_vf_res_request *vfres =
3237 (struct virtchnl_vf_res_request *)msg;
3238 u16 req_queues = vfres->num_queue_pairs;
3239 struct ice_pf *pf = vf->pf;
3240 u16 max_allowed_vf_queues;
3241 u16 tx_rx_queue_left;
3242 struct device *dev;
3243 u16 cur_queues;
3244
3245 dev = ice_pf_to_dev(pf);
3246 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3247 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3248 goto error_param;
3249 }
3250
3251 cur_queues = vf->num_vf_qs;
3252 tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
3253 ice_get_avail_rxq_count(pf));
3254 max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
3255 if (!req_queues) {
3256 dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
3257 vf->vf_id);
3258 } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
3259 dev_err(dev, "VF %d tried to request more than %d queues.\n",
3260 vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
3261 vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
3262 } else if (req_queues > cur_queues &&
3263 req_queues - cur_queues > tx_rx_queue_left) {
3264 dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
3265 vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
3266 vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
3267 ICE_MAX_RSS_QS_PER_VF);
3268 } else {
3269
3270 vf->num_req_qs = req_queues;
3271 ice_vc_reset_vf(vf);
3272 dev_info(dev, "VF %d granted request of %u queues.\n",
3273 vf->vf_id, req_queues);
3274 return 0;
3275 }
3276
3277error_param:
3278
3279 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
3280 v_ret, (u8 *)vfres, sizeof(*vfres));
3281}
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293int
3294ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
3295 __be16 vlan_proto)
3296{
3297 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3298 struct device *dev;
3299 struct ice_vf *vf;
3300 u16 vlanprio;
3301 int ret;
3302
3303 dev = ice_pf_to_dev(pf);
3304 if (ice_validate_vf_id(pf, vf_id))
3305 return -EINVAL;
3306
3307 if (vlan_id >= VLAN_N_VID || qos > 7) {
3308 dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
3309 vf_id, vlan_id, qos);
3310 return -EINVAL;
3311 }
3312
3313 if (vlan_proto != htons(ETH_P_8021Q)) {
3314 dev_err(dev, "VF VLAN protocol is not supported\n");
3315 return -EPROTONOSUPPORT;
3316 }
3317
3318 vf = &pf->vf[vf_id];
3319 ret = ice_check_vf_ready_for_cfg(vf);
3320 if (ret)
3321 return ret;
3322
3323 vlanprio = vlan_id | (qos << VLAN_PRIO_SHIFT);
3324
3325 if (vf->port_vlan_info == vlanprio) {
3326
3327 dev_dbg(dev, "Duplicate pvid %d request\n", vlanprio);
3328 return 0;
3329 }
3330
3331 vf->port_vlan_info = vlanprio;
3332
3333 if (vf->port_vlan_info)
3334 dev_info(dev, "Setting VLAN %d, QoS 0x%x on VF %d\n",
3335 vlan_id, qos, vf_id);
3336 else
3337 dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id);
3338
3339 ice_vc_reset_vf(vf);
3340
3341 return 0;
3342}
3343
3344
3345
3346
3347
3348
3349
3350static bool ice_vf_vlan_offload_ena(u32 caps)
3351{
3352 return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
3353}
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
3364{
3365 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3366 struct virtchnl_vlan_filter_list *vfl =
3367 (struct virtchnl_vlan_filter_list *)msg;
3368 struct ice_pf *pf = vf->pf;
3369 bool vlan_promisc = false;
3370 struct ice_vsi *vsi;
3371 struct device *dev;
3372 struct ice_hw *hw;
3373 int status = 0;
3374 u8 promisc_m;
3375 int i;
3376
3377 dev = ice_pf_to_dev(pf);
3378 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3379 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3380 goto error_param;
3381 }
3382
3383 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3384 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3385 goto error_param;
3386 }
3387
3388 if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
3389 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3390 goto error_param;
3391 }
3392
3393 for (i = 0; i < vfl->num_elements; i++) {
3394 if (vfl->vlan_id[i] >= VLAN_N_VID) {
3395 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3396 dev_err(dev, "invalid VF VLAN id %d\n",
3397 vfl->vlan_id[i]);
3398 goto error_param;
3399 }
3400 }
3401
3402 hw = &pf->hw;
3403 vsi = pf->vsi[vf->lan_vsi_idx];
3404 if (!vsi) {
3405 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3406 goto error_param;
3407 }
3408
3409 if (add_v && !ice_is_vf_trusted(vf) &&
3410 vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
3411 dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
3412 vf->vf_id);
3413
3414
3415
3416 goto error_param;
3417 }
3418
3419 if (vsi->info.pvid) {
3420 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3421 goto error_param;
3422 }
3423
3424 if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
3425 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
3426 test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags))
3427 vlan_promisc = true;
3428
3429 if (add_v) {
3430 for (i = 0; i < vfl->num_elements; i++) {
3431 u16 vid = vfl->vlan_id[i];
3432
3433 if (!ice_is_vf_trusted(vf) &&
3434 vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
3435 dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
3436 vf->vf_id);
3437
3438
3439
3440
3441 goto error_param;
3442 }
3443
3444
3445
3446
3447
3448 if (!vid)
3449 continue;
3450
3451 status = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
3452 if (status) {
3453 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3454 goto error_param;
3455 }
3456
3457
3458 if (!vlan_promisc && vid &&
3459 !ice_vsi_is_vlan_pruning_ena(vsi)) {
3460 status = ice_cfg_vlan_pruning(vsi, true, false);
3461 if (status) {
3462 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3463 dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
3464 vid, status);
3465 goto error_param;
3466 }
3467 } else if (vlan_promisc) {
3468
3469 promisc_m = ICE_PROMISC_VLAN_TX |
3470 ICE_PROMISC_VLAN_RX;
3471
3472 status = ice_set_vsi_promisc(hw, vsi->idx,
3473 promisc_m, vid);
3474 if (status) {
3475 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3476 dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
3477 vid, status);
3478 }
3479 }
3480 }
3481 } else {
3482
3483
3484
3485
3486
3487
3488
3489 int num_vf_vlan;
3490
3491 num_vf_vlan = vsi->num_vlan;
3492 for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
3493 u16 vid = vfl->vlan_id[i];
3494
3495
3496
3497
3498
3499 if (!vid)
3500 continue;
3501
3502
3503
3504
3505 status = ice_vsi_kill_vlan(vsi, vid);
3506 if (status) {
3507 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3508 goto error_param;
3509 }
3510
3511
3512 if (vsi->num_vlan == 1 &&
3513 ice_vsi_is_vlan_pruning_ena(vsi))
3514 ice_cfg_vlan_pruning(vsi, false, false);
3515
3516
3517 if (vlan_promisc) {
3518 promisc_m = ICE_PROMISC_VLAN_TX |
3519 ICE_PROMISC_VLAN_RX;
3520
3521 ice_clear_vsi_promisc(hw, vsi->idx,
3522 promisc_m, vid);
3523 }
3524 }
3525 }
3526
3527error_param:
3528
3529 if (add_v)
3530 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
3531 NULL, 0);
3532 else
3533 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
3534 NULL, 0);
3535}
3536
3537
3538
3539
3540
3541
3542
3543
3544static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
3545{
3546 return ice_vc_process_vlan_msg(vf, msg, true);
3547}
3548
3549
3550
3551
3552
3553
3554
3555
3556static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
3557{
3558 return ice_vc_process_vlan_msg(vf, msg, false);
3559}
3560
3561
3562
3563
3564
3565
3566
3567static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
3568{
3569 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3570 struct ice_pf *pf = vf->pf;
3571 struct ice_vsi *vsi;
3572
3573 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3574 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3575 goto error_param;
3576 }
3577
3578 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3579 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3580 goto error_param;
3581 }
3582
3583 vsi = pf->vsi[vf->lan_vsi_idx];
3584 if (ice_vsi_manage_vlan_stripping(vsi, true))
3585 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3586
3587error_param:
3588 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
3589 v_ret, NULL, 0);
3590}
3591
3592
3593
3594
3595
3596
3597
3598static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
3599{
3600 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3601 struct ice_pf *pf = vf->pf;
3602 struct ice_vsi *vsi;
3603
3604 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3605 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3606 goto error_param;
3607 }
3608
3609 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3610 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3611 goto error_param;
3612 }
3613
3614 vsi = pf->vsi[vf->lan_vsi_idx];
3615 if (!vsi) {
3616 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3617 goto error_param;
3618 }
3619
3620 if (ice_vsi_manage_vlan_stripping(vsi, false))
3621 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3622
3623error_param:
3624 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
3625 v_ret, NULL, 0);
3626}
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
3639{
3640 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
3641
3642 if (!vsi)
3643 return -EINVAL;
3644
3645
3646 if (vsi->info.pvid)
3647 return 0;
3648
3649 if (ice_vf_vlan_offload_ena(vf->driver_caps))
3650 return ice_vsi_manage_vlan_stripping(vsi, true);
3651 else
3652 return ice_vsi_manage_vlan_stripping(vsi, false);
3653}
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
3664{
3665 u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
3666 s16 vf_id = le16_to_cpu(event->desc.retval);
3667 u16 msglen = event->msg_len;
3668 u8 *msg = event->msg_buf;
3669 struct ice_vf *vf = NULL;
3670 struct device *dev;
3671 int err = 0;
3672
3673 dev = ice_pf_to_dev(pf);
3674 if (ice_validate_vf_id(pf, vf_id)) {
3675 err = -EINVAL;
3676 goto error_handler;
3677 }
3678
3679 vf = &pf->vf[vf_id];
3680
3681
3682 if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
3683 err = -EPERM;
3684 goto error_handler;
3685 }
3686
3687
3688 err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
3689 if (err) {
3690 if (err == VIRTCHNL_STATUS_ERR_PARAM)
3691 err = -EPERM;
3692 else
3693 err = -EINVAL;
3694 }
3695
3696error_handler:
3697 if (err) {
3698 ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
3699 NULL, 0);
3700 dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
3701 vf_id, v_opcode, msglen, err);
3702 return;
3703 }
3704
3705 switch (v_opcode) {
3706 case VIRTCHNL_OP_VERSION:
3707 err = ice_vc_get_ver_msg(vf, msg);
3708 break;
3709 case VIRTCHNL_OP_GET_VF_RESOURCES:
3710 err = ice_vc_get_vf_res_msg(vf, msg);
3711 if (ice_vf_init_vlan_stripping(vf))
3712 dev_err(dev, "Failed to initialize VLAN stripping for VF %d\n",
3713 vf->vf_id);
3714 ice_vc_notify_vf_link_state(vf);
3715 break;
3716 case VIRTCHNL_OP_RESET_VF:
3717 ice_vc_reset_vf_msg(vf);
3718 break;
3719 case VIRTCHNL_OP_ADD_ETH_ADDR:
3720 err = ice_vc_add_mac_addr_msg(vf, msg);
3721 break;
3722 case VIRTCHNL_OP_DEL_ETH_ADDR:
3723 err = ice_vc_del_mac_addr_msg(vf, msg);
3724 break;
3725 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
3726 err = ice_vc_cfg_qs_msg(vf, msg);
3727 break;
3728 case VIRTCHNL_OP_ENABLE_QUEUES:
3729 err = ice_vc_ena_qs_msg(vf, msg);
3730 ice_vc_notify_vf_link_state(vf);
3731 break;
3732 case VIRTCHNL_OP_DISABLE_QUEUES:
3733 err = ice_vc_dis_qs_msg(vf, msg);
3734 break;
3735 case VIRTCHNL_OP_REQUEST_QUEUES:
3736 err = ice_vc_request_qs_msg(vf, msg);
3737 break;
3738 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
3739 err = ice_vc_cfg_irq_map_msg(vf, msg);
3740 break;
3741 case VIRTCHNL_OP_CONFIG_RSS_KEY:
3742 err = ice_vc_config_rss_key(vf, msg);
3743 break;
3744 case VIRTCHNL_OP_CONFIG_RSS_LUT:
3745 err = ice_vc_config_rss_lut(vf, msg);
3746 break;
3747 case VIRTCHNL_OP_GET_STATS:
3748 err = ice_vc_get_stats_msg(vf, msg);
3749 break;
3750 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
3751 err = ice_vc_cfg_promiscuous_mode_msg(vf, msg);
3752 break;
3753 case VIRTCHNL_OP_ADD_VLAN:
3754 err = ice_vc_add_vlan_msg(vf, msg);
3755 break;
3756 case VIRTCHNL_OP_DEL_VLAN:
3757 err = ice_vc_remove_vlan_msg(vf, msg);
3758 break;
3759 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
3760 err = ice_vc_ena_vlan_stripping(vf);
3761 break;
3762 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
3763 err = ice_vc_dis_vlan_stripping(vf);
3764 break;
3765 case VIRTCHNL_OP_UNKNOWN:
3766 default:
3767 dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
3768 vf_id);
3769 err = ice_vc_send_msg_to_vf(vf, v_opcode,
3770 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
3771 NULL, 0);
3772 break;
3773 }
3774 if (err) {
3775
3776
3777
3778 dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
3779 vf_id, v_opcode, err);
3780 }
3781}
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791int
3792ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
3793{
3794 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3795 struct ice_vf *vf;
3796
3797 if (ice_validate_vf_id(pf, vf_id))
3798 return -EINVAL;
3799
3800 vf = &pf->vf[vf_id];
3801
3802 if (ice_check_vf_init(pf, vf))
3803 return -EBUSY;
3804
3805 ivi->vf = vf_id;
3806 ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr);
3807
3808
3809 ivi->vlan = vf->port_vlan_info & VLAN_VID_MASK;
3810 ivi->qos = (vf->port_vlan_info & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
3811
3812 ivi->trusted = vf->trusted;
3813 ivi->spoofchk = vf->spoofchk;
3814 if (!vf->link_forced)
3815 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
3816 else if (vf->link_up)
3817 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
3818 else
3819 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
3820 ivi->max_tx_rate = vf->tx_rate;
3821 ivi->min_tx_rate = 0;
3822 return 0;
3823}
3824
3825
3826
3827
3828
3829
3830
3831
3832static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac)
3833{
3834 struct ice_sw_recipe *mac_recipe_list =
3835 &pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC];
3836 struct ice_fltr_mgmt_list_entry *list_itr;
3837 struct list_head *rule_head;
3838 struct mutex *rule_lock;
3839
3840 rule_head = &mac_recipe_list->filt_rules;
3841 rule_lock = &mac_recipe_list->filt_rule_lock;
3842
3843 mutex_lock(rule_lock);
3844 list_for_each_entry(list_itr, rule_head, list_entry) {
3845 u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3846
3847 if (ether_addr_equal(existing_mac, umac)) {
3848 mutex_unlock(rule_lock);
3849 return true;
3850 }
3851 }
3852
3853 mutex_unlock(rule_lock);
3854
3855 return false;
3856}
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
3867{
3868 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3869 struct ice_vf *vf;
3870 int ret;
3871
3872 if (ice_validate_vf_id(pf, vf_id))
3873 return -EINVAL;
3874
3875 if (is_multicast_ether_addr(mac)) {
3876 netdev_err(netdev, "%pM not a valid unicast address\n", mac);
3877 return -EINVAL;
3878 }
3879
3880 vf = &pf->vf[vf_id];
3881
3882 if (ether_addr_equal(vf->dflt_lan_addr.addr, mac))
3883 return 0;
3884
3885 ret = ice_check_vf_ready_for_cfg(vf);
3886 if (ret)
3887 return ret;
3888
3889 if (ice_unicast_mac_exists(pf, mac)) {
3890 netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n",
3891 mac, vf_id, mac);
3892 return -EINVAL;
3893 }
3894
3895
3896
3897
3898 ether_addr_copy(vf->dflt_lan_addr.addr, mac);
3899 if (is_zero_ether_addr(mac)) {
3900
3901 vf->pf_set_mac = false;
3902 netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
3903 vf->vf_id);
3904 } else {
3905
3906 vf->pf_set_mac = true;
3907 netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
3908 mac, vf_id);
3909 }
3910
3911 ice_vc_reset_vf(vf);
3912 return 0;
3913}
3914
3915
3916
3917
3918
3919
3920
3921
3922
3923int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
3924{
3925 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3926 struct ice_vf *vf;
3927 int ret;
3928
3929 if (ice_validate_vf_id(pf, vf_id))
3930 return -EINVAL;
3931
3932 vf = &pf->vf[vf_id];
3933 ret = ice_check_vf_ready_for_cfg(vf);
3934 if (ret)
3935 return ret;
3936
3937
3938 if (trusted == vf->trusted)
3939 return 0;
3940
3941 vf->trusted = trusted;
3942 ice_vc_reset_vf(vf);
3943 dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
3944 vf_id, trusted ? "" : "un");
3945
3946 return 0;
3947}
3948
3949
3950
3951
3952
3953
3954
3955
3956
3957int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
3958{
3959 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3960 struct ice_vf *vf;
3961 int ret;
3962
3963 if (ice_validate_vf_id(pf, vf_id))
3964 return -EINVAL;
3965
3966 vf = &pf->vf[vf_id];
3967 ret = ice_check_vf_ready_for_cfg(vf);
3968 if (ret)
3969 return ret;
3970
3971 switch (link_state) {
3972 case IFLA_VF_LINK_STATE_AUTO:
3973 vf->link_forced = false;
3974 break;
3975 case IFLA_VF_LINK_STATE_ENABLE:
3976 vf->link_forced = true;
3977 vf->link_up = true;
3978 break;
3979 case IFLA_VF_LINK_STATE_DISABLE:
3980 vf->link_forced = true;
3981 vf->link_up = false;
3982 break;
3983 default:
3984 return -EINVAL;
3985 }
3986
3987 ice_vc_notify_vf_link_state(vf);
3988
3989 return 0;
3990}
3991
3992
3993
3994
3995
3996
3997
3998int ice_get_vf_stats(struct net_device *netdev, int vf_id,
3999 struct ifla_vf_stats *vf_stats)
4000{
4001 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4002 struct ice_eth_stats *stats;
4003 struct ice_vsi *vsi;
4004 struct ice_vf *vf;
4005 int ret;
4006
4007 if (ice_validate_vf_id(pf, vf_id))
4008 return -EINVAL;
4009
4010 vf = &pf->vf[vf_id];
4011 ret = ice_check_vf_ready_for_cfg(vf);
4012 if (ret)
4013 return ret;
4014
4015 vsi = pf->vsi[vf->lan_vsi_idx];
4016 if (!vsi)
4017 return -EINVAL;
4018
4019 ice_update_eth_stats(vsi);
4020 stats = &vsi->eth_stats;
4021
4022 memset(vf_stats, 0, sizeof(*vf_stats));
4023
4024 vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
4025 stats->rx_multicast;
4026 vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
4027 stats->tx_multicast;
4028 vf_stats->rx_bytes = stats->rx_bytes;
4029 vf_stats->tx_bytes = stats->tx_bytes;
4030 vf_stats->broadcast = stats->rx_broadcast;
4031 vf_stats->multicast = stats->rx_multicast;
4032 vf_stats->rx_dropped = stats->rx_discards;
4033 vf_stats->tx_dropped = stats->tx_discards;
4034
4035 return 0;
4036}
4037
4038
4039
4040
4041
4042void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
4043{
4044 struct ice_pf *pf = vf->pf;
4045 struct device *dev;
4046
4047 dev = ice_pf_to_dev(pf);
4048
4049 dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
4050 vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
4051 vf->dflt_lan_addr.addr,
4052 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
4053 ? "on" : "off");
4054}
4055
4056
4057
4058
4059
4060
4061
4062void ice_print_vfs_mdd_events(struct ice_pf *pf)
4063{
4064 struct device *dev = ice_pf_to_dev(pf);
4065 struct ice_hw *hw = &pf->hw;
4066 int i;
4067
4068
4069 if (!test_and_clear_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state))
4070 return;
4071
4072
4073 if (time_is_after_jiffies(pf->last_printed_mdd_jiffies + HZ * 1))
4074 return;
4075
4076 pf->last_printed_mdd_jiffies = jiffies;
4077
4078 ice_for_each_vf(pf, i) {
4079 struct ice_vf *vf = &pf->vf[i];
4080
4081
4082 if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
4083 vf->mdd_rx_events.last_printed =
4084 vf->mdd_rx_events.count;
4085 ice_print_vf_rx_mdd_event(vf);
4086 }
4087
4088
4089 if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
4090 vf->mdd_tx_events.last_printed =
4091 vf->mdd_tx_events.count;
4092
4093 dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
4094 vf->mdd_tx_events.count, hw->pf_id, i,
4095 vf->dflt_lan_addr.addr);
4096 }
4097 }
4098}
4099