1
2
3
4#include "ice.h"
5#include "ice_base.h"
6#include "ice_lib.h"
7#include "ice_fltr.h"
8
9
10
11
12
13
14static int ice_validate_vf_id(struct ice_pf *pf, u16 vf_id)
15{
16
17 if (vf_id >= pf->num_alloc_vfs) {
18 dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %u\n", vf_id);
19 return -EINVAL;
20 }
21 return 0;
22}
23
24
25
26
27
28
29static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
30{
31 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
32 dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
33 vf->vf_id);
34 return -EBUSY;
35 }
36 return 0;
37}
38
39
40
41
42
43static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
44{
45 switch (ice_err) {
46 case ICE_SUCCESS:
47 return VIRTCHNL_STATUS_SUCCESS;
48 case ICE_ERR_BAD_PTR:
49 case ICE_ERR_INVAL_SIZE:
50 case ICE_ERR_DEVICE_NOT_SUPPORTED:
51 case ICE_ERR_PARAM:
52 case ICE_ERR_CFG:
53 return VIRTCHNL_STATUS_ERR_PARAM;
54 case ICE_ERR_NO_MEMORY:
55 return VIRTCHNL_STATUS_ERR_NO_MEMORY;
56 case ICE_ERR_NOT_READY:
57 case ICE_ERR_RESET_FAILED:
58 case ICE_ERR_FW_API_VER:
59 case ICE_ERR_AQ_ERROR:
60 case ICE_ERR_AQ_TIMEOUT:
61 case ICE_ERR_AQ_FULL:
62 case ICE_ERR_AQ_NO_WORK:
63 case ICE_ERR_AQ_EMPTY:
64 return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
65 default:
66 return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
67 }
68}
69
70
71
72
73
74
75
76
77
78static void
79ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
80 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
81{
82 struct ice_hw *hw = &pf->hw;
83 unsigned int i;
84
85 ice_for_each_vf(pf, i) {
86 struct ice_vf *vf = &pf->vf[i];
87
88
89 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
90 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
91 continue;
92
93
94
95
96 ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
97 msglen, NULL);
98 }
99}
100
101
102
103
104
105
106
107
108static void
109ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
110 int ice_link_speed, bool link_up)
111{
112 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
113 pfe->event_data.link_event_adv.link_status = link_up;
114
115 pfe->event_data.link_event_adv.link_speed =
116 ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
117 } else {
118 pfe->event_data.link_event.link_status = link_up;
119
120 pfe->event_data.link_event.link_speed =
121 (enum virtchnl_link_speed)
122 ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
123 }
124}
125
126
127
128
129
130
131
132
133static bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
134{
135 return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
136 !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
137}
138
139
140
141
142
143static bool ice_is_vf_link_up(struct ice_vf *vf)
144{
145 struct ice_pf *pf = vf->pf;
146
147 if (ice_check_vf_init(pf, vf))
148 return false;
149
150 if (ice_vf_has_no_qs_ena(vf))
151 return false;
152 else if (vf->link_forced)
153 return vf->link_up;
154 else
155 return pf->hw.port_info->phy.link_info.link_info &
156 ICE_AQ_LINK_UP;
157}
158
159
160
161
162
163
164
165static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
166{
167 struct virtchnl_pf_event pfe = { 0 };
168 struct ice_hw *hw = &vf->pf->hw;
169
170 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
171 pfe.severity = PF_EVENT_SEVERITY_INFO;
172
173 if (ice_is_vf_link_up(vf))
174 ice_set_pfe_link(vf, &pfe,
175 hw->port_info->phy.link_info.link_speed, true);
176 else
177 ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
178
179 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
180 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
181 sizeof(pfe), NULL);
182}
183
184
185
186
187
188static void ice_vf_invalidate_vsi(struct ice_vf *vf)
189{
190 vf->lan_vsi_idx = ICE_NO_VSI;
191 vf->lan_vsi_num = ICE_NO_VSI;
192}
193
194
195
196
197
198static void ice_vf_vsi_release(struct ice_vf *vf)
199{
200 ice_vsi_release(vf->pf->vsi[vf->lan_vsi_idx]);
201 ice_vf_invalidate_vsi(vf);
202}
203
204
205
206
207
208static void ice_free_vf_res(struct ice_vf *vf)
209{
210 struct ice_pf *pf = vf->pf;
211 int i, last_vector_idx;
212
213
214
215
216 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
217
218
219 if (vf->lan_vsi_idx != ICE_NO_VSI) {
220 ice_vf_vsi_release(vf);
221 vf->num_mac = 0;
222 }
223
224 last_vector_idx = vf->first_vector_idx + pf->num_msix_per_vf - 1;
225
226
227 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
228 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
229
230
231 for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
232 wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
233 ice_flush(&pf->hw);
234 }
235
236 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
237 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
238}
239
240
241
242
243
244static void ice_dis_vf_mappings(struct ice_vf *vf)
245{
246 struct ice_pf *pf = vf->pf;
247 struct ice_vsi *vsi;
248 struct device *dev;
249 int first, last, v;
250 struct ice_hw *hw;
251
252 hw = &pf->hw;
253 vsi = pf->vsi[vf->lan_vsi_idx];
254
255 dev = ice_pf_to_dev(pf);
256 wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
257 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
258
259 first = vf->first_vector_idx;
260 last = first + pf->num_msix_per_vf - 1;
261 for (v = first; v <= last; v++) {
262 u32 reg;
263
264 reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
265 GLINT_VECT2FUNC_IS_PF_M) |
266 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
267 GLINT_VECT2FUNC_PF_NUM_M));
268 wr32(hw, GLINT_VECT2FUNC(v), reg);
269 }
270
271 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
272 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
273 else
274 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
275
276 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
277 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
278 else
279 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
280}
281
282
283
284
285
286
287
288
289
290
291static int ice_sriov_free_msix_res(struct ice_pf *pf)
292{
293 struct ice_res_tracker *res;
294
295 if (!pf)
296 return -EINVAL;
297
298 res = pf->irq_tracker;
299 if (!res)
300 return -EINVAL;
301
302
303 WARN_ON(pf->sriov_base_vector < res->num_entries);
304
305 pf->sriov_base_vector = 0;
306
307 return 0;
308}
309
310
311
312
313
314void ice_set_vf_state_qs_dis(struct ice_vf *vf)
315{
316
317 bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
318 bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
319 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
320}
321
322
323
324
325
326static void ice_dis_vf_qs(struct ice_vf *vf)
327{
328 struct ice_pf *pf = vf->pf;
329 struct ice_vsi *vsi;
330
331 vsi = pf->vsi[vf->lan_vsi_idx];
332
333 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
334 ice_vsi_stop_all_rx_rings(vsi);
335 ice_set_vf_state_qs_dis(vf);
336}
337
338
339
340
341
342void ice_free_vfs(struct ice_pf *pf)
343{
344 struct device *dev = ice_pf_to_dev(pf);
345 struct ice_hw *hw = &pf->hw;
346 unsigned int tmp, i;
347
348 if (!pf->vf)
349 return;
350
351 while (test_and_set_bit(__ICE_VF_DIS, pf->state))
352 usleep_range(1000, 2000);
353
354
355
356
357
358 if (!pci_vfs_assigned(pf->pdev))
359 pci_disable_sriov(pf->pdev);
360 else
361 dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
362
363
364 ice_for_each_vf(pf, i)
365 if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states))
366 ice_dis_vf_qs(&pf->vf[i]);
367
368 tmp = pf->num_alloc_vfs;
369 pf->num_qps_per_vf = 0;
370 pf->num_alloc_vfs = 0;
371 for (i = 0; i < tmp; i++) {
372 if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
373
374 ice_dis_vf_mappings(&pf->vf[i]);
375 set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states);
376 ice_free_vf_res(&pf->vf[i]);
377 }
378 }
379
380 if (ice_sriov_free_msix_res(pf))
381 dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
382
383 devm_kfree(dev, pf->vf);
384 pf->vf = NULL;
385
386
387
388
389
390 if (!pci_vfs_assigned(pf->pdev)) {
391 unsigned int vf_id;
392
393
394
395
396 for (vf_id = 0; vf_id < tmp; vf_id++) {
397 u32 reg_idx, bit_idx;
398
399 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
400 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
401 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
402 }
403 }
404 clear_bit(__ICE_VF_DIS, pf->state);
405 clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
406}
407
408
409
410
411
412
413
414
415
416
417
418static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
419{
420 struct ice_pf *pf = vf->pf;
421 u32 reg, reg_idx, bit_idx;
422 unsigned int vf_abs_id, i;
423 struct device *dev;
424 struct ice_hw *hw;
425
426 dev = ice_pf_to_dev(pf);
427 hw = &pf->hw;
428 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
429
430
431 clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
432
433
434
435
436 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
437
438
439
440
441
442
443 if (!is_pfr)
444 wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
445
446
447
448
449 if (!is_vflr) {
450
451 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
452 reg |= VPGEN_VFRTRIG_VFSWR_M;
453 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
454 }
455
456 reg_idx = (vf_abs_id) / 32;
457 bit_idx = (vf_abs_id) % 32;
458 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
459 ice_flush(hw);
460
461 wr32(hw, PF_PCI_CIAA,
462 VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
463 for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
464 reg = rd32(hw, PF_PCI_CIAD);
465
466 if ((reg & VF_TRANS_PENDING_M) == 0)
467 break;
468
469 dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
470 udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
471 }
472}
473
474
475
476
477
478
479
480static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable)
481{
482 struct ice_hw *hw = &vsi->back->hw;
483 struct ice_aqc_vsi_props *info;
484 struct ice_vsi_ctx *ctxt;
485 enum ice_status status;
486 int ret = 0;
487
488 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
489 if (!ctxt)
490 return -ENOMEM;
491
492 ctxt->info = vsi->info;
493 info = &ctxt->info;
494 if (enable) {
495 info->vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
496 ICE_AQ_VSI_PVLAN_INSERT_PVID |
497 ICE_AQ_VSI_VLAN_EMOD_STR;
498 info->sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
499 } else {
500 info->vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING |
501 ICE_AQ_VSI_VLAN_MODE_ALL;
502 info->sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
503 }
504
505 info->pvid = cpu_to_le16(pvid_info);
506 info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
507 ICE_AQ_VSI_PROP_SW_VALID);
508
509 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
510 if (status) {
511 dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %s aq_err %s\n",
512 ice_stat_str(status),
513 ice_aq_str(hw->adminq.sq_last_status));
514 ret = -EIO;
515 goto out;
516 }
517
518 vsi->info.vlan_flags = info->vlan_flags;
519 vsi->info.sw_flags2 = info->sw_flags2;
520 vsi->info.pvid = info->pvid;
521out:
522 kfree(ctxt);
523 return ret;
524}
525
526
527
528
529
530static struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
531{
532 return vf->pf->hw.port_info;
533}
534
535
536
537
538
539
540
541
542static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
543{
544 struct ice_port_info *pi = ice_vf_get_port_info(vf);
545 struct ice_pf *pf = vf->pf;
546 struct ice_vsi *vsi;
547
548 vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id);
549
550 if (!vsi) {
551 dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
552 ice_vf_invalidate_vsi(vf);
553 return NULL;
554 }
555
556 vf->lan_vsi_idx = vsi->idx;
557 vf->lan_vsi_num = vsi->vsi_num;
558
559 return vsi;
560}
561
562
563
564
565
566
567
568
569
570
571
572
573
574static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
575{
576 return pf->sriov_base_vector + vf->vf_id * pf->num_msix_per_vf;
577}
578
579
580
581
582
583
584
585
586static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf)
587{
588 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
589 struct device *dev = ice_pf_to_dev(vf->pf);
590 u16 vlan_id = 0;
591 int err;
592
593 if (vf->port_vlan_info) {
594 err = ice_vsi_manage_pvid(vsi, vf->port_vlan_info, true);
595 if (err) {
596 dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
597 vf->vf_id, err);
598 return err;
599 }
600
601 vlan_id = vf->port_vlan_info & VLAN_VID_MASK;
602 }
603
604
605 err = ice_vsi_add_vlan(vsi, vlan_id, ICE_FWD_TO_VSI);
606 if (err) {
607 dev_err(dev, "failed to add %s VLAN %u filter for VF %u, error %d\n",
608 vf->port_vlan_info ? "port" : "", vlan_id, vf->vf_id,
609 err);
610 return err;
611 }
612
613 return 0;
614}
615
616
617
618
619
620
621
622
623static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
624{
625 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
626 struct device *dev = ice_pf_to_dev(vf->pf);
627 enum ice_status status;
628 u8 broadcast[ETH_ALEN];
629
630 eth_broadcast_addr(broadcast);
631 status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
632 if (status) {
633 dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %s\n",
634 vf->vf_id, ice_stat_str(status));
635 return ice_status_to_errno(status);
636 }
637
638 vf->num_mac++;
639
640 if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) {
641 status = ice_fltr_add_mac(vsi, vf->dflt_lan_addr.addr,
642 ICE_FWD_TO_VSI);
643 if (status) {
644 dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %s\n",
645 &vf->dflt_lan_addr.addr[0], vf->vf_id,
646 ice_stat_str(status));
647 return ice_status_to_errno(status);
648 }
649 vf->num_mac++;
650 }
651
652 return 0;
653}
654
655
656
657
658
659static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
660{
661 if (vf->trusted)
662 set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
663 else
664 clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
665}
666
667
668
669
670
671
672
673
674
675static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
676{
677 int device_based_first_msix, device_based_last_msix;
678 int pf_based_first_msix, pf_based_last_msix, v;
679 struct ice_pf *pf = vf->pf;
680 int device_based_vf_id;
681 struct ice_hw *hw;
682 u32 reg;
683
684 hw = &pf->hw;
685 pf_based_first_msix = vf->first_vector_idx;
686 pf_based_last_msix = (pf_based_first_msix + pf->num_msix_per_vf) - 1;
687
688 device_based_first_msix = pf_based_first_msix +
689 pf->hw.func_caps.common_cap.msix_vector_first_id;
690 device_based_last_msix =
691 (device_based_first_msix + pf->num_msix_per_vf) - 1;
692 device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
693
694 reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
695 VPINT_ALLOC_FIRST_M) |
696 ((device_based_last_msix << VPINT_ALLOC_LAST_S) &
697 VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M);
698 wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
699
700 reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S)
701 & VPINT_ALLOC_PCI_FIRST_M) |
702 ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) &
703 VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M);
704 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
705
706
707 for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
708 reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
709 GLINT_VECT2FUNC_VF_NUM_M) |
710 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
711 GLINT_VECT2FUNC_PF_NUM_M));
712 wr32(hw, GLINT_VECT2FUNC(v), reg);
713 }
714
715
716 wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
717}
718
719
720
721
722
723
724
725static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
726{
727 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
728 struct device *dev = ice_pf_to_dev(vf->pf);
729 struct ice_hw *hw = &vf->pf->hw;
730 u32 reg;
731
732
733 wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
734
735
736 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
737
738
739
740
741 reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
742 VPLAN_TX_QBASE_VFFIRSTQ_M) |
743 (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
744 VPLAN_TX_QBASE_VFNUMQ_M));
745 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
746 } else {
747 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
748 }
749
750
751 wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
752
753
754 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
755
756
757
758
759 reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
760 VPLAN_RX_QBASE_VFFIRSTQ_M) |
761 (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
762 VPLAN_RX_QBASE_VFNUMQ_M));
763 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
764 } else {
765 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
766 }
767}
768
769
770
771
772
773static void ice_ena_vf_mappings(struct ice_vf *vf)
774{
775 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
776
777 ice_ena_vf_msix_mappings(vf);
778 ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
779}
780
781
782
783
784
785
786
787
788
789
790
791static int
792ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
793{
794 bool checked_min_res = false;
795 int res;
796
797
798
799
800
801
802
803
804 res = max_res;
805 while ((res >= min_res) && !checked_min_res) {
806 int num_all_res;
807
808 num_all_res = pf->num_alloc_vfs * res;
809 if (num_all_res <= avail_res)
810 return res;
811
812 if (res == min_res)
813 checked_min_res = true;
814
815 res = DIV_ROUND_UP(res, 2);
816 }
817 return 0;
818}
819
820
821
822
823
824
825int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
826{
827 struct ice_pf *pf;
828
829 if (!vf || !q_vector)
830 return -EINVAL;
831
832 pf = vf->pf;
833
834
835 return pf->sriov_base_vector + pf->num_msix_per_vf * vf->vf_id +
836 q_vector->v_idx + 1;
837}
838
839
840
841
842
843
844
845
846
847
848static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
849{
850 int i;
851
852 if (!res)
853 return -EINVAL;
854
855 for (i = res->num_entries - 1; i >= 0; i--)
856 if (res->list[i] & ICE_RES_VALID_BIT)
857 return i;
858
859 return 0;
860}
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
878{
879 u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
880 int vectors_used = pf->irq_tracker->num_entries;
881 int sriov_base_vector;
882
883 sriov_base_vector = total_vectors - num_msix_needed;
884
885
886
887
888 if (sriov_base_vector < vectors_used)
889 return -EINVAL;
890
891 pf->sriov_base_vector = sriov_base_vector;
892
893 return 0;
894}
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917static int ice_set_per_vf_res(struct ice_pf *pf)
918{
919 int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
920 int msix_avail_per_vf, msix_avail_for_sriov;
921 struct device *dev = ice_pf_to_dev(pf);
922 u16 num_msix_per_vf, num_txq, num_rxq;
923
924 if (!pf->num_alloc_vfs || max_valid_res_idx < 0)
925 return -EINVAL;
926
927
928 msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
929 pf->irq_tracker->num_entries;
930 msix_avail_per_vf = msix_avail_for_sriov / pf->num_alloc_vfs;
931 if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
932 num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
933 } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
934 num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
935 } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) {
936 num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN;
937 } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
938 num_msix_per_vf = ICE_MIN_INTR_PER_VF;
939 } else {
940 dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
941 msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
942 pf->num_alloc_vfs);
943 return -EIO;
944 }
945
946
947 num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf),
948 min_t(u16,
949 num_msix_per_vf - ICE_NONQ_VECS_VF,
950 ICE_MAX_RSS_QS_PER_VF),
951 ICE_MIN_QS_PER_VF);
952
953 num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf),
954 min_t(u16,
955 num_msix_per_vf - ICE_NONQ_VECS_VF,
956 ICE_MAX_RSS_QS_PER_VF),
957 ICE_MIN_QS_PER_VF);
958
959 if (!num_txq || !num_rxq) {
960 dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
961 ICE_MIN_QS_PER_VF, pf->num_alloc_vfs);
962 return -EIO;
963 }
964
965 if (ice_sriov_set_msix_res(pf, num_msix_per_vf * pf->num_alloc_vfs)) {
966 dev_err(dev, "Unable to set MSI-X resources for %d VFs\n",
967 pf->num_alloc_vfs);
968 return -EINVAL;
969 }
970
971
972 pf->num_qps_per_vf = min_t(int, num_txq, num_rxq);
973 pf->num_msix_per_vf = num_msix_per_vf;
974 dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
975 pf->num_alloc_vfs, pf->num_msix_per_vf, pf->num_qps_per_vf);
976
977 return 0;
978}
979
980
981
982
983
984static void ice_clear_vf_reset_trigger(struct ice_vf *vf)
985{
986 struct ice_hw *hw = &vf->pf->hw;
987 u32 reg;
988
989 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
990 reg &= ~VPGEN_VFRTRIG_VFSWR_M;
991 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
992 ice_flush(hw);
993}
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005static enum ice_status
1006ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
1007 bool rm_promisc)
1008{
1009 struct ice_pf *pf = vf->pf;
1010 enum ice_status status = 0;
1011 struct ice_hw *hw;
1012
1013 hw = &pf->hw;
1014 if (vsi->num_vlan) {
1015 status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
1016 rm_promisc);
1017 } else if (vf->port_vlan_info) {
1018 if (rm_promisc)
1019 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
1020 vf->port_vlan_info);
1021 else
1022 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
1023 vf->port_vlan_info);
1024 } else {
1025 if (rm_promisc)
1026 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
1027 0);
1028 else
1029 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
1030 0);
1031 }
1032
1033 return status;
1034}
1035
1036static void ice_vf_clear_counters(struct ice_vf *vf)
1037{
1038 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
1039
1040 vf->num_mac = 0;
1041 vsi->num_vlan = 0;
1042 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
1043 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
1044}
1045
1046
1047
1048
1049
1050
1051
1052
1053static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
1054{
1055 ice_vf_clear_counters(vf);
1056 ice_clear_vf_reset_trigger(vf);
1057}
1058
1059
1060
1061
1062
1063static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
1064{
1065 struct device *dev = ice_pf_to_dev(vf->pf);
1066
1067 ice_vf_set_host_trust_cfg(vf);
1068
1069 if (ice_vf_rebuild_host_mac_cfg(vf))
1070 dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
1071 vf->vf_id);
1072
1073 if (ice_vf_rebuild_host_vlan_cfg(vf))
1074 dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
1075 vf->vf_id);
1076}
1077
1078
1079
1080
1081
1082
1083
1084
1085static int ice_vf_rebuild_vsi_with_release(struct ice_vf *vf)
1086{
1087 ice_vf_vsi_release(vf);
1088 if (!ice_vf_vsi_setup(vf))
1089 return -ENOMEM;
1090
1091 return 0;
1092}
1093
1094
1095
1096
1097
1098
1099
1100
1101static int ice_vf_rebuild_vsi(struct ice_vf *vf)
1102{
1103 struct ice_pf *pf = vf->pf;
1104 struct ice_vsi *vsi;
1105
1106 vsi = pf->vsi[vf->lan_vsi_idx];
1107
1108 if (ice_vsi_rebuild(vsi, true)) {
1109 dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
1110 vf->vf_id);
1111 return -EIO;
1112 }
1113
1114
1115
1116 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
1117 vf->lan_vsi_num = vsi->vsi_num;
1118
1119 return 0;
1120}
1121
1122
1123
1124
1125
1126
1127
1128
1129static void ice_vf_set_initialized(struct ice_vf *vf)
1130{
1131 ice_set_vf_state_qs_dis(vf);
1132 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
1133 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
1134 clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
1135 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
1136}
1137
1138
1139
1140
1141
1142static void ice_vf_post_vsi_rebuild(struct ice_vf *vf)
1143{
1144 struct ice_pf *pf = vf->pf;
1145 struct ice_hw *hw;
1146
1147 hw = &pf->hw;
1148
1149 ice_vf_rebuild_host_cfg(vf);
1150
1151 ice_vf_set_initialized(vf);
1152 ice_ena_vf_mappings(vf);
1153 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1154}
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
1169{
1170 struct device *dev = ice_pf_to_dev(pf);
1171 struct ice_hw *hw = &pf->hw;
1172 struct ice_vf *vf;
1173 int v, i;
1174
1175
1176 if (!pf->num_alloc_vfs)
1177 return false;
1178
1179
1180 if (test_and_set_bit(__ICE_VF_DIS, pf->state))
1181 return false;
1182
1183
1184 ice_for_each_vf(pf, v)
1185 ice_trigger_vf_reset(&pf->vf[v], is_vflr, true);
1186
1187
1188
1189
1190
1191
1192
1193 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1194
1195 while (v < pf->num_alloc_vfs) {
1196 u32 reg;
1197
1198 vf = &pf->vf[v];
1199 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1200 if (!(reg & VPGEN_VFRSTAT_VFRD_M)) {
1201
1202 usleep_range(10, 20);
1203 break;
1204 }
1205
1206
1207
1208
1209 v++;
1210 }
1211 }
1212
1213
1214
1215
1216 if (v < pf->num_alloc_vfs)
1217 dev_warn(dev, "VF reset check timeout\n");
1218
1219
1220 ice_for_each_vf(pf, v) {
1221 vf = &pf->vf[v];
1222
1223 ice_vf_pre_vsi_rebuild(vf);
1224 ice_vf_rebuild_vsi(vf);
1225 ice_vf_post_vsi_rebuild(vf);
1226 }
1227
1228 ice_flush(hw);
1229 clear_bit(__ICE_VF_DIS, pf->state);
1230
1231 return true;
1232}
1233
1234
1235
1236
1237
1238
1239
1240static bool ice_is_vf_disabled(struct ice_vf *vf)
1241{
1242 struct ice_pf *pf = vf->pf;
1243
1244
1245
1246
1247
1248
1249 return (test_bit(__ICE_VF_DIS, pf->state) ||
1250 test_bit(ICE_VF_STATE_DIS, vf->vf_states));
1251}
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
1262{
1263 struct ice_pf *pf = vf->pf;
1264 struct ice_vsi *vsi;
1265 struct device *dev;
1266 struct ice_hw *hw;
1267 bool rsd = false;
1268 u8 promisc_m;
1269 u32 reg;
1270 int i;
1271
1272 dev = ice_pf_to_dev(pf);
1273
1274 if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) {
1275 dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
1276 vf->vf_id);
1277 return true;
1278 }
1279
1280 if (ice_is_vf_disabled(vf)) {
1281 dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
1282 vf->vf_id);
1283 return true;
1284 }
1285
1286
1287 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1288 ice_trigger_vf_reset(vf, is_vflr, false);
1289
1290 vsi = pf->vsi[vf->lan_vsi_idx];
1291
1292 if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
1293 ice_dis_vf_qs(vf);
1294
1295
1296
1297
1298 ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
1299 NULL, ICE_VF_RESET, vf->vf_id, NULL);
1300
1301 hw = &pf->hw;
1302
1303
1304
1305 for (i = 0; i < 10; i++) {
1306
1307
1308
1309
1310 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1311 if (reg & VPGEN_VFRSTAT_VFRD_M) {
1312 rsd = true;
1313 break;
1314 }
1315
1316
1317 usleep_range(10, 20);
1318 }
1319
1320
1321
1322
1323 if (!rsd)
1324 dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
1325
1326
1327
1328
1329 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
1330 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
1331 if (vf->port_vlan_info || vsi->num_vlan)
1332 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
1333 else
1334 promisc_m = ICE_UCAST_PROMISC_BITS;
1335
1336 vsi = pf->vsi[vf->lan_vsi_idx];
1337 if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
1338 dev_err(dev, "disabling promiscuous mode failed\n");
1339 }
1340
1341 ice_vf_pre_vsi_rebuild(vf);
1342 ice_vf_rebuild_vsi_with_release(vf);
1343 ice_vf_post_vsi_rebuild(vf);
1344
1345 return true;
1346}
1347
1348
1349
1350
1351
1352void ice_vc_notify_link_state(struct ice_pf *pf)
1353{
1354 int i;
1355
1356 ice_for_each_vf(pf, i)
1357 ice_vc_notify_vf_link_state(&pf->vf[i]);
1358}
1359
1360
1361
1362
1363
1364
1365
1366void ice_vc_notify_reset(struct ice_pf *pf)
1367{
1368 struct virtchnl_pf_event pfe;
1369
1370 if (!pf->num_alloc_vfs)
1371 return;
1372
1373 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1374 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1375 ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
1376 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
1377}
1378
1379
1380
1381
1382
1383static void ice_vc_notify_vf_reset(struct ice_vf *vf)
1384{
1385 struct virtchnl_pf_event pfe;
1386 struct ice_pf *pf;
1387
1388 if (!vf)
1389 return;
1390
1391 pf = vf->pf;
1392 if (ice_validate_vf_id(pf, vf->vf_id))
1393 return;
1394
1395
1396
1397
1398 if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
1399 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
1400 test_bit(ICE_VF_STATE_DIS, vf->vf_states))
1401 return;
1402
1403 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1404 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1405 ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
1406 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
1407 NULL);
1408}
1409
1410
1411
1412
1413
1414
1415
1416
1417static int ice_init_vf_vsi_res(struct ice_vf *vf)
1418{
1419 struct ice_pf *pf = vf->pf;
1420 u8 broadcast[ETH_ALEN];
1421 enum ice_status status;
1422 struct ice_vsi *vsi;
1423 struct device *dev;
1424 int err;
1425
1426 vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
1427
1428 dev = ice_pf_to_dev(pf);
1429 vsi = ice_vf_vsi_setup(vf);
1430 if (!vsi)
1431 return -ENOMEM;
1432
1433 err = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI);
1434 if (err) {
1435 dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
1436 vf->vf_id);
1437 goto release_vsi;
1438 }
1439
1440 eth_broadcast_addr(broadcast);
1441 status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
1442 if (status) {
1443 dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %s\n",
1444 vf->vf_id, ice_stat_str(status));
1445 err = ice_status_to_errno(status);
1446 goto release_vsi;
1447 }
1448
1449 vf->num_mac = 1;
1450
1451 return 0;
1452
1453release_vsi:
1454 ice_vf_vsi_release(vf);
1455 return err;
1456}
1457
1458
1459
1460
1461
1462static int ice_start_vfs(struct ice_pf *pf)
1463{
1464 struct ice_hw *hw = &pf->hw;
1465 int retval, i;
1466
1467 ice_for_each_vf(pf, i) {
1468 struct ice_vf *vf = &pf->vf[i];
1469
1470 ice_clear_vf_reset_trigger(vf);
1471
1472 retval = ice_init_vf_vsi_res(vf);
1473 if (retval) {
1474 dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
1475 vf->vf_id, retval);
1476 goto teardown;
1477 }
1478
1479 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
1480 ice_ena_vf_mappings(vf);
1481 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1482 }
1483
1484 ice_flush(hw);
1485 return 0;
1486
1487teardown:
1488 for (i = i - 1; i >= 0; i--) {
1489 struct ice_vf *vf = &pf->vf[i];
1490
1491 ice_dis_vf_mappings(vf);
1492 ice_vf_vsi_release(vf);
1493 }
1494
1495 return retval;
1496}
1497
1498
1499
1500
1501
1502static void ice_set_dflt_settings_vfs(struct ice_pf *pf)
1503{
1504 int i;
1505
1506 ice_for_each_vf(pf, i) {
1507 struct ice_vf *vf = &pf->vf[i];
1508
1509 vf->pf = pf;
1510 vf->vf_id = i;
1511 vf->vf_sw_id = pf->first_sw;
1512
1513 set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vf->vf_caps);
1514 vf->spoofchk = true;
1515 vf->num_vf_qs = pf->num_qps_per_vf;
1516 }
1517}
1518
1519
1520
1521
1522
1523
1524static int ice_alloc_vfs(struct ice_pf *pf, int num_vfs)
1525{
1526 struct ice_vf *vfs;
1527
1528 vfs = devm_kcalloc(ice_pf_to_dev(pf), num_vfs, sizeof(*vfs),
1529 GFP_KERNEL);
1530 if (!vfs)
1531 return -ENOMEM;
1532
1533 pf->vf = vfs;
1534 pf->num_alloc_vfs = num_vfs;
1535
1536 return 0;
1537}
1538
1539
1540
1541
1542
1543
1544static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
1545{
1546 struct device *dev = ice_pf_to_dev(pf);
1547 struct ice_hw *hw = &pf->hw;
1548 int ret;
1549
1550
1551 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
1552 ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
1553 set_bit(__ICE_OICR_INTR_DIS, pf->state);
1554 ice_flush(hw);
1555
1556 ret = pci_enable_sriov(pf->pdev, num_vfs);
1557 if (ret) {
1558 pf->num_alloc_vfs = 0;
1559 goto err_unroll_intr;
1560 }
1561
1562 ret = ice_alloc_vfs(pf, num_vfs);
1563 if (ret)
1564 goto err_pci_disable_sriov;
1565
1566 if (ice_set_per_vf_res(pf)) {
1567 dev_err(dev, "Not enough resources for %d VFs, try with fewer number of VFs\n",
1568 num_vfs);
1569 ret = -ENOSPC;
1570 goto err_unroll_sriov;
1571 }
1572
1573 ice_set_dflt_settings_vfs(pf);
1574
1575 if (ice_start_vfs(pf)) {
1576 dev_err(dev, "Failed to start VF(s)\n");
1577 ret = -EAGAIN;
1578 goto err_unroll_sriov;
1579 }
1580
1581 clear_bit(__ICE_VF_DIS, pf->state);
1582 return 0;
1583
1584err_unroll_sriov:
1585 devm_kfree(dev, pf->vf);
1586 pf->vf = NULL;
1587 pf->num_alloc_vfs = 0;
1588err_pci_disable_sriov:
1589 pci_disable_sriov(pf->pdev);
1590err_unroll_intr:
1591
1592 ice_irq_dynamic_ena(hw, NULL, NULL);
1593 clear_bit(__ICE_OICR_INTR_DIS, pf->state);
1594 return ret;
1595}
1596
1597
1598
1599
1600
1601
1602
1603
1604static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
1605{
1606 int pre_existing_vfs = pci_num_vf(pf->pdev);
1607 struct device *dev = ice_pf_to_dev(pf);
1608 int err;
1609
1610 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1611 ice_free_vfs(pf);
1612 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1613 return 0;
1614
1615 if (num_vfs > pf->num_vfs_supported) {
1616 dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
1617 num_vfs, pf->num_vfs_supported);
1618 return -EOPNOTSUPP;
1619 }
1620
1621 dev_info(dev, "Enabling %d VFs\n", num_vfs);
1622 err = ice_ena_vfs(pf, num_vfs);
1623 if (err) {
1624 dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
1625 return err;
1626 }
1627
1628 set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
1629 return 0;
1630}
1631
1632
1633
1634
1635
1636static int ice_check_sriov_allowed(struct ice_pf *pf)
1637{
1638 struct device *dev = ice_pf_to_dev(pf);
1639
1640 if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
1641 dev_err(dev, "This device is not capable of SR-IOV\n");
1642 return -EOPNOTSUPP;
1643 }
1644
1645 if (ice_is_safe_mode(pf)) {
1646 dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
1647 return -EOPNOTSUPP;
1648 }
1649
1650 if (!ice_pf_state_is_nominal(pf)) {
1651 dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
1652 return -EBUSY;
1653 }
1654
1655 return 0;
1656}
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
1668{
1669 struct ice_pf *pf = pci_get_drvdata(pdev);
1670 struct device *dev = ice_pf_to_dev(pf);
1671 int err;
1672
1673 err = ice_check_sriov_allowed(pf);
1674 if (err)
1675 return err;
1676
1677 if (!num_vfs) {
1678 if (!pci_vfs_assigned(pdev)) {
1679 ice_free_vfs(pf);
1680 return 0;
1681 }
1682
1683 dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
1684 return -EBUSY;
1685 }
1686
1687 err = ice_pci_sriov_ena(pf, num_vfs);
1688 if (err)
1689 return err;
1690
1691 return num_vfs;
1692}
1693
1694
1695
1696
1697
1698
1699
1700
1701void ice_process_vflr_event(struct ice_pf *pf)
1702{
1703 struct ice_hw *hw = &pf->hw;
1704 unsigned int vf_id;
1705 u32 reg;
1706
1707 if (!test_and_clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
1708 !pf->num_alloc_vfs)
1709 return;
1710
1711 ice_for_each_vf(pf, vf_id) {
1712 struct ice_vf *vf = &pf->vf[vf_id];
1713 u32 reg_idx, bit_idx;
1714
1715 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1716 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1717
1718 reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
1719 if (reg & BIT(bit_idx))
1720
1721 ice_reset_vf(vf, true);
1722 }
1723}
1724
1725
1726
1727
1728
1729static void ice_vc_reset_vf(struct ice_vf *vf)
1730{
1731 ice_vc_notify_vf_reset(vf);
1732 ice_reset_vf(vf, false);
1733}
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
1744{
1745 unsigned int vf_id;
1746
1747 ice_for_each_vf(pf, vf_id) {
1748 struct ice_vf *vf = &pf->vf[vf_id];
1749 struct ice_vsi *vsi;
1750 u16 rxq_idx;
1751
1752 vsi = pf->vsi[vf->lan_vsi_idx];
1753
1754 ice_for_each_rxq(vsi, rxq_idx)
1755 if (vsi->rxq_map[rxq_idx] == pfq)
1756 return vf;
1757 }
1758
1759 return NULL;
1760}
1761
1762
1763
1764
1765
1766
1767static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
1768{
1769 return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
1770}
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781void
1782ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1783{
1784 u32 gldcb_rtctq, queue;
1785 struct ice_vf *vf;
1786
1787 gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
1788 dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
1789
1790
1791 queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >>
1792 GLDCB_RTCTQ_RXQNUM_S;
1793
1794 vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
1795 if (!vf)
1796 return;
1797
1798 ice_vc_reset_vf(vf);
1799}
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811static int
1812ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
1813 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
1814{
1815 enum ice_status aq_ret;
1816 struct device *dev;
1817 struct ice_pf *pf;
1818
1819 if (!vf)
1820 return -EINVAL;
1821
1822 pf = vf->pf;
1823 if (ice_validate_vf_id(pf, vf->vf_id))
1824 return -EINVAL;
1825
1826 dev = ice_pf_to_dev(pf);
1827
1828
1829 if (v_retval) {
1830 vf->num_inval_msgs++;
1831 dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id,
1832 v_opcode, v_retval);
1833 if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
1834 dev_err(dev, "Number of invalid messages exceeded for VF %d\n",
1835 vf->vf_id);
1836 dev_err(dev, "Use PF Control I/F to enable the VF\n");
1837 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1838 return -EIO;
1839 }
1840 } else {
1841 vf->num_valid_msgs++;
1842
1843 vf->num_inval_msgs = 0;
1844 }
1845
1846 aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
1847 msg, msglen, NULL);
1848 if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
1849 dev_info(dev, "Unable to send the message to VF %d ret %s aq_err %s\n",
1850 vf->vf_id, ice_stat_str(aq_ret),
1851 ice_aq_str(pf->hw.mailboxq.sq_last_status));
1852 return -EIO;
1853 }
1854
1855 return 0;
1856}
1857
1858
1859
1860
1861
1862
1863
1864
1865static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
1866{
1867 struct virtchnl_version_info info = {
1868 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
1869 };
1870
1871 vf->vf_ver = *(struct virtchnl_version_info *)msg;
1872
1873 if (VF_IS_V10(&vf->vf_ver))
1874 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1875
1876 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
1877 VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
1878 sizeof(struct virtchnl_version_info));
1879}
1880
1881
1882
1883
1884
1885
1886
1887
1888static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
1889{
1890 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1891 struct virtchnl_vf_resource *vfres = NULL;
1892 struct ice_pf *pf = vf->pf;
1893 struct ice_vsi *vsi;
1894 int len = 0;
1895 int ret;
1896
1897 if (ice_check_vf_init(pf, vf)) {
1898 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1899 goto err;
1900 }
1901
1902 len = sizeof(struct virtchnl_vf_resource);
1903
1904 vfres = kzalloc(len, GFP_KERNEL);
1905 if (!vfres) {
1906 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1907 len = 0;
1908 goto err;
1909 }
1910 if (VF_IS_V11(&vf->vf_ver))
1911 vf->driver_caps = *(u32 *)msg;
1912 else
1913 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
1914 VIRTCHNL_VF_OFFLOAD_RSS_REG |
1915 VIRTCHNL_VF_OFFLOAD_VLAN;
1916
1917 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
1918 vsi = pf->vsi[vf->lan_vsi_idx];
1919 if (!vsi) {
1920 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1921 goto err;
1922 }
1923
1924 if (!vsi->info.pvid)
1925 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
1926
1927 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1928 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
1929 } else {
1930 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
1931 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
1932 else
1933 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
1934 }
1935
1936 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1937 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
1938
1939 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
1940 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
1941
1942 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
1943 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
1944
1945 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
1946 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
1947
1948 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1949 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
1950
1951 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
1952 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
1953
1954 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
1955 vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
1956
1957 vfres->num_vsis = 1;
1958
1959 vfres->num_queue_pairs = vsi->num_txq;
1960 vfres->max_vectors = pf->num_msix_per_vf;
1961 vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
1962 vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
1963
1964 vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
1965 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
1966 vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
1967 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
1968 vf->dflt_lan_addr.addr);
1969
1970
1971 vf->driver_caps = vfres->vf_cap_flags;
1972
1973 set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
1974
1975err:
1976
1977 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
1978 (u8 *)vfres, len);
1979
1980 kfree(vfres);
1981 return ret;
1982}
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992static void ice_vc_reset_vf_msg(struct ice_vf *vf)
1993{
1994 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
1995 ice_reset_vf(vf, false);
1996}
1997
1998
1999
2000
2001
2002
2003
2004
2005static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
2006{
2007 int i;
2008
2009 ice_for_each_vsi(pf, i)
2010 if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
2011 return pf->vsi[i];
2012
2013 return NULL;
2014}
2015
2016
2017
2018
2019
2020
2021
2022
2023static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
2024{
2025 struct ice_pf *pf = vf->pf;
2026 struct ice_vsi *vsi;
2027
2028 vsi = ice_find_vsi_from_id(pf, vsi_id);
2029
2030 return (vsi && (vsi->vf_id == vf->vf_id));
2031}
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
2042{
2043 struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
2044
2045 return (vsi && (qid < vsi->alloc_txq));
2046}
2047
2048
2049
2050
2051
2052
2053
2054
2055static bool ice_vc_isvalid_ring_len(u16 ring_len)
2056{
2057 return ring_len == 0 ||
2058 (ring_len >= ICE_MIN_NUM_DESC &&
2059 ring_len <= ICE_MAX_NUM_DESC &&
2060 !(ring_len % ICE_REQ_DESC_MULTIPLE));
2061}
2062
2063
2064
2065
2066
2067
2068
2069
2070static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
2071{
2072 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2073 struct virtchnl_rss_key *vrk =
2074 (struct virtchnl_rss_key *)msg;
2075 struct ice_pf *pf = vf->pf;
2076 struct ice_vsi *vsi;
2077
2078 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2079 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2080 goto error_param;
2081 }
2082
2083 if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
2084 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2085 goto error_param;
2086 }
2087
2088 if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
2089 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2090 goto error_param;
2091 }
2092
2093 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
2094 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2095 goto error_param;
2096 }
2097
2098 vsi = pf->vsi[vf->lan_vsi_idx];
2099 if (!vsi) {
2100 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2101 goto error_param;
2102 }
2103
2104 if (ice_set_rss(vsi, vrk->key, NULL, 0))
2105 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2106error_param:
2107 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
2108 NULL, 0);
2109}
2110
2111
2112
2113
2114
2115
2116
2117
2118static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
2119{
2120 struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
2121 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2122 struct ice_pf *pf = vf->pf;
2123 struct ice_vsi *vsi;
2124
2125 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2126 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2127 goto error_param;
2128 }
2129
2130 if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
2131 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2132 goto error_param;
2133 }
2134
2135 if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
2136 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2137 goto error_param;
2138 }
2139
2140 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
2141 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2142 goto error_param;
2143 }
2144
2145 vsi = pf->vsi[vf->lan_vsi_idx];
2146 if (!vsi) {
2147 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2148 goto error_param;
2149 }
2150
2151 if (ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
2152 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2153error_param:
2154 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
2155 NULL, 0);
2156}
2157
2158
2159
2160
2161
2162
2163
2164
2165static void ice_wait_on_vf_reset(struct ice_vf *vf)
2166{
2167 int i;
2168
2169 for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
2170 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
2171 break;
2172 msleep(ICE_MAX_VF_RESET_SLEEP_MS);
2173 }
2174}
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184static int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
2185{
2186 struct ice_pf *pf;
2187
2188 ice_wait_on_vf_reset(vf);
2189
2190 if (ice_is_vf_disabled(vf))
2191 return -EINVAL;
2192
2193 pf = vf->pf;
2194 if (ice_check_vf_init(pf, vf))
2195 return -EBUSY;
2196
2197 return 0;
2198}
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
2209{
2210 struct ice_netdev_priv *np = netdev_priv(netdev);
2211 struct ice_pf *pf = np->vsi->back;
2212 struct ice_vsi_ctx *ctx;
2213 struct ice_vsi *vf_vsi;
2214 enum ice_status status;
2215 struct device *dev;
2216 struct ice_vf *vf;
2217 int ret;
2218
2219 dev = ice_pf_to_dev(pf);
2220 if (ice_validate_vf_id(pf, vf_id))
2221 return -EINVAL;
2222
2223 vf = &pf->vf[vf_id];
2224 ret = ice_check_vf_ready_for_cfg(vf);
2225 if (ret)
2226 return ret;
2227
2228 vf_vsi = pf->vsi[vf->lan_vsi_idx];
2229 if (!vf_vsi) {
2230 netdev_err(netdev, "VSI %d for VF %d is null\n",
2231 vf->lan_vsi_idx, vf->vf_id);
2232 return -EINVAL;
2233 }
2234
2235 if (vf_vsi->type != ICE_VSI_VF) {
2236 netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
2237 vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
2238 return -ENODEV;
2239 }
2240
2241 if (ena == vf->spoofchk) {
2242 dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
2243 return 0;
2244 }
2245
2246 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2247 if (!ctx)
2248 return -ENOMEM;
2249
2250 ctx->info.sec_flags = vf_vsi->info.sec_flags;
2251 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
2252 if (ena) {
2253 ctx->info.sec_flags |=
2254 ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
2255 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2256 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
2257 } else {
2258 ctx->info.sec_flags &=
2259 ~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
2260 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2261 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
2262 }
2263
2264 status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
2265 if (status) {
2266 dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %s\n",
2267 ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num,
2268 ice_stat_str(status));
2269 ret = -EIO;
2270 goto out;
2271 }
2272
2273
2274 vf_vsi->info.sec_flags = ctx->info.sec_flags;
2275 vf->spoofchk = ena;
2276
2277out:
2278 kfree(ctx);
2279 return ret;
2280}
2281
2282
2283
2284
2285
2286
2287
2288
2289bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
2290{
2291 int vf_idx;
2292
2293 ice_for_each_vf(pf, vf_idx) {
2294 struct ice_vf *vf = &pf->vf[vf_idx];
2295
2296
2297 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
2298 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
2299 return true;
2300 }
2301
2302 return false;
2303}
2304
2305
2306
2307
2308
2309
2310
2311
2312static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
2313{
2314 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2315 struct virtchnl_promisc_info *info =
2316 (struct virtchnl_promisc_info *)msg;
2317 struct ice_pf *pf = vf->pf;
2318 struct ice_vsi *vsi;
2319 struct device *dev;
2320 bool rm_promisc;
2321 int ret = 0;
2322
2323 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2324 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2325 goto error_param;
2326 }
2327
2328 if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) {
2329 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2330 goto error_param;
2331 }
2332
2333 vsi = pf->vsi[vf->lan_vsi_idx];
2334 if (!vsi) {
2335 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2336 goto error_param;
2337 }
2338
2339 dev = ice_pf_to_dev(pf);
2340 if (!test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2341 dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n",
2342 vf->vf_id);
2343
2344 goto error_param;
2345 }
2346
2347 rm_promisc = !(info->flags & FLAG_VF_UNICAST_PROMISC) &&
2348 !(info->flags & FLAG_VF_MULTICAST_PROMISC);
2349
2350 if (vsi->num_vlan || vf->port_vlan_info) {
2351 struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
2352 struct net_device *pf_netdev;
2353
2354 if (!pf_vsi) {
2355 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2356 goto error_param;
2357 }
2358
2359 pf_netdev = pf_vsi->netdev;
2360
2361 ret = ice_set_vf_spoofchk(pf_netdev, vf->vf_id, rm_promisc);
2362 if (ret) {
2363 dev_err(dev, "Failed to update spoofchk to %s for VF %d VSI %d when setting promiscuous mode\n",
2364 rm_promisc ? "ON" : "OFF", vf->vf_id,
2365 vsi->vsi_num);
2366 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2367 }
2368
2369 ret = ice_cfg_vlan_pruning(vsi, true, !rm_promisc);
2370 if (ret) {
2371 dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
2372 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2373 goto error_param;
2374 }
2375 }
2376
2377 if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
2378 bool set_dflt_vsi = !!(info->flags & FLAG_VF_UNICAST_PROMISC);
2379
2380 if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw))
2381
2382
2383
2384 ret = ice_set_dflt_vsi(pf->first_sw, vsi);
2385 else if (!set_dflt_vsi &&
2386 ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
2387
2388
2389
2390 ret = ice_clear_dflt_vsi(pf->first_sw);
2391
2392 if (ret) {
2393 dev_err(dev, "%sable VF %d as the default VSI failed, error %d\n",
2394 set_dflt_vsi ? "en" : "dis", vf->vf_id, ret);
2395 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2396 goto error_param;
2397 }
2398 } else {
2399 enum ice_status status;
2400 u8 promisc_m;
2401
2402 if (info->flags & FLAG_VF_UNICAST_PROMISC) {
2403 if (vf->port_vlan_info || vsi->num_vlan)
2404 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
2405 else
2406 promisc_m = ICE_UCAST_PROMISC_BITS;
2407 } else if (info->flags & FLAG_VF_MULTICAST_PROMISC) {
2408 if (vf->port_vlan_info || vsi->num_vlan)
2409 promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
2410 else
2411 promisc_m = ICE_MCAST_PROMISC_BITS;
2412 } else {
2413 if (vf->port_vlan_info || vsi->num_vlan)
2414 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
2415 else
2416 promisc_m = ICE_UCAST_PROMISC_BITS;
2417 }
2418
2419
2420
2421
2422 status = ice_vf_set_vsi_promisc(vf, vsi, promisc_m, rm_promisc);
2423 if (status) {
2424 dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed, error: %s\n",
2425 rm_promisc ? "dis" : "en", vf->vf_id,
2426 ice_stat_str(status));
2427 v_ret = ice_err_to_virt_err(status);
2428 goto error_param;
2429 } else {
2430 dev_dbg(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d succeeded\n",
2431 rm_promisc ? "dis" : "en", vf->vf_id);
2432 }
2433 }
2434
2435 if (info->flags & FLAG_VF_MULTICAST_PROMISC)
2436 set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
2437 else
2438 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
2439
2440 if (info->flags & FLAG_VF_UNICAST_PROMISC)
2441 set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
2442 else
2443 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
2444
2445error_param:
2446 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
2447 v_ret, NULL, 0);
2448}
2449
2450
2451
2452
2453
2454
2455
2456
2457static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
2458{
2459 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2460 struct virtchnl_queue_select *vqs =
2461 (struct virtchnl_queue_select *)msg;
2462 struct ice_eth_stats stats = { 0 };
2463 struct ice_pf *pf = vf->pf;
2464 struct ice_vsi *vsi;
2465
2466 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2467 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2468 goto error_param;
2469 }
2470
2471 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2472 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2473 goto error_param;
2474 }
2475
2476 vsi = pf->vsi[vf->lan_vsi_idx];
2477 if (!vsi) {
2478 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2479 goto error_param;
2480 }
2481
2482 ice_update_eth_stats(vsi);
2483
2484 stats = vsi->eth_stats;
2485
2486error_param:
2487
2488 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
2489 (u8 *)&stats, sizeof(stats));
2490}
2491
2492
2493
2494
2495
2496
2497
2498static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
2499{
2500 if ((!vqs->rx_queues && !vqs->tx_queues) ||
2501 vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
2502 vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
2503 return false;
2504
2505 return true;
2506}
2507
2508
2509
2510
2511
2512
2513static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
2514{
2515 struct ice_hw *hw = &vsi->back->hw;
2516 u32 pfq = vsi->txq_map[q_idx];
2517 u32 reg;
2518
2519 reg = rd32(hw, QINT_TQCTL(pfq));
2520
2521
2522
2523
2524
2525 if (!(reg & QINT_TQCTL_MSIX_INDX_M))
2526 return;
2527
2528 wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
2529}
2530
2531
2532
2533
2534
2535
2536static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
2537{
2538 struct ice_hw *hw = &vsi->back->hw;
2539 u32 pfq = vsi->rxq_map[q_idx];
2540 u32 reg;
2541
2542 reg = rd32(hw, QINT_RQCTL(pfq));
2543
2544
2545
2546
2547
2548 if (!(reg & QINT_RQCTL_MSIX_INDX_M))
2549 return;
2550
2551 wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
2552}
2553
2554
2555
2556
2557
2558
2559
2560
2561static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
2562{
2563 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2564 struct virtchnl_queue_select *vqs =
2565 (struct virtchnl_queue_select *)msg;
2566 struct ice_pf *pf = vf->pf;
2567 struct ice_vsi *vsi;
2568 unsigned long q_map;
2569 u16 vf_q_id;
2570
2571 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2572 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2573 goto error_param;
2574 }
2575
2576 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2577 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2578 goto error_param;
2579 }
2580
2581 if (!ice_vc_validate_vqs_bitmaps(vqs)) {
2582 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2583 goto error_param;
2584 }
2585
2586 vsi = pf->vsi[vf->lan_vsi_idx];
2587 if (!vsi) {
2588 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2589 goto error_param;
2590 }
2591
2592
2593
2594
2595
2596 q_map = vqs->rx_queues;
2597 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
2598 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2599 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2600 goto error_param;
2601 }
2602
2603
2604 if (test_bit(vf_q_id, vf->rxq_ena))
2605 continue;
2606
2607 if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
2608 dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
2609 vf_q_id, vsi->vsi_num);
2610 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2611 goto error_param;
2612 }
2613
2614 ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
2615 set_bit(vf_q_id, vf->rxq_ena);
2616 }
2617
2618 vsi = pf->vsi[vf->lan_vsi_idx];
2619 q_map = vqs->tx_queues;
2620 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
2621 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2622 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2623 goto error_param;
2624 }
2625
2626
2627 if (test_bit(vf_q_id, vf->txq_ena))
2628 continue;
2629
2630 ice_vf_ena_txq_interrupt(vsi, vf_q_id);
2631 set_bit(vf_q_id, vf->txq_ena);
2632 }
2633
2634
2635 if (v_ret == VIRTCHNL_STATUS_SUCCESS)
2636 set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
2637
2638error_param:
2639
2640 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
2641 NULL, 0);
2642}
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
2653{
2654 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2655 struct virtchnl_queue_select *vqs =
2656 (struct virtchnl_queue_select *)msg;
2657 struct ice_pf *pf = vf->pf;
2658 struct ice_vsi *vsi;
2659 unsigned long q_map;
2660 u16 vf_q_id;
2661
2662 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
2663 !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
2664 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2665 goto error_param;
2666 }
2667
2668 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2669 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2670 goto error_param;
2671 }
2672
2673 if (!ice_vc_validate_vqs_bitmaps(vqs)) {
2674 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2675 goto error_param;
2676 }
2677
2678 vsi = pf->vsi[vf->lan_vsi_idx];
2679 if (!vsi) {
2680 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2681 goto error_param;
2682 }
2683
2684 if (vqs->tx_queues) {
2685 q_map = vqs->tx_queues;
2686
2687 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
2688 struct ice_ring *ring = vsi->tx_rings[vf_q_id];
2689 struct ice_txq_meta txq_meta = { 0 };
2690
2691 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2692 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2693 goto error_param;
2694 }
2695
2696
2697 if (!test_bit(vf_q_id, vf->txq_ena))
2698 continue;
2699
2700 ice_fill_txq_meta(vsi, ring, &txq_meta);
2701
2702 if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
2703 ring, &txq_meta)) {
2704 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
2705 vf_q_id, vsi->vsi_num);
2706 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2707 goto error_param;
2708 }
2709
2710
2711 clear_bit(vf_q_id, vf->txq_ena);
2712 }
2713 }
2714
2715 q_map = vqs->rx_queues;
2716
2717 if (q_map &&
2718 bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
2719 if (ice_vsi_stop_all_rx_rings(vsi)) {
2720 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
2721 vsi->vsi_num);
2722 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2723 goto error_param;
2724 }
2725
2726 bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
2727 } else if (q_map) {
2728 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
2729 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2730 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2731 goto error_param;
2732 }
2733
2734
2735 if (!test_bit(vf_q_id, vf->rxq_ena))
2736 continue;
2737
2738 if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
2739 true)) {
2740 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
2741 vf_q_id, vsi->vsi_num);
2742 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2743 goto error_param;
2744 }
2745
2746
2747 clear_bit(vf_q_id, vf->rxq_ena);
2748 }
2749 }
2750
2751
2752 if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
2753 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
2754
2755error_param:
2756
2757 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
2758 NULL, 0);
2759}
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770static int
2771ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
2772 struct virtchnl_vector_map *map,
2773 struct ice_q_vector *q_vector)
2774{
2775 u16 vsi_q_id, vsi_q_id_idx;
2776 unsigned long qmap;
2777
2778 q_vector->num_ring_rx = 0;
2779 q_vector->num_ring_tx = 0;
2780
2781 qmap = map->rxq_map;
2782 for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
2783 vsi_q_id = vsi_q_id_idx;
2784
2785 if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
2786 return VIRTCHNL_STATUS_ERR_PARAM;
2787
2788 q_vector->num_ring_rx++;
2789 q_vector->rx.itr_idx = map->rxitr_idx;
2790 vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
2791 ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
2792 q_vector->rx.itr_idx);
2793 }
2794
2795 qmap = map->txq_map;
2796 for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
2797 vsi_q_id = vsi_q_id_idx;
2798
2799 if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
2800 return VIRTCHNL_STATUS_ERR_PARAM;
2801
2802 q_vector->num_ring_tx++;
2803 q_vector->tx.itr_idx = map->txitr_idx;
2804 vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
2805 ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
2806 q_vector->tx.itr_idx);
2807 }
2808
2809 return VIRTCHNL_STATUS_SUCCESS;
2810}
2811
2812
2813
2814
2815
2816
2817
2818
2819static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
2820{
2821 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2822 u16 num_q_vectors_mapped, vsi_id, vector_id;
2823 struct virtchnl_irq_map_info *irqmap_info;
2824 struct virtchnl_vector_map *map;
2825 struct ice_pf *pf = vf->pf;
2826 struct ice_vsi *vsi;
2827 int i;
2828
2829 irqmap_info = (struct virtchnl_irq_map_info *)msg;
2830 num_q_vectors_mapped = irqmap_info->num_vectors;
2831
2832
2833
2834
2835
2836 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
2837 pf->num_msix_per_vf < num_q_vectors_mapped ||
2838 !num_q_vectors_mapped) {
2839 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2840 goto error_param;
2841 }
2842
2843 vsi = pf->vsi[vf->lan_vsi_idx];
2844 if (!vsi) {
2845 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2846 goto error_param;
2847 }
2848
2849 for (i = 0; i < num_q_vectors_mapped; i++) {
2850 struct ice_q_vector *q_vector;
2851
2852 map = &irqmap_info->vecmap[i];
2853
2854 vector_id = map->vector_id;
2855 vsi_id = map->vsi_id;
2856
2857
2858
2859 if (!(vector_id < pf->num_msix_per_vf) ||
2860 !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
2861 (!vector_id && (map->rxq_map || map->txq_map))) {
2862 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2863 goto error_param;
2864 }
2865
2866
2867 if (!vector_id)
2868 continue;
2869
2870
2871
2872
2873 q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
2874 if (!q_vector) {
2875 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2876 goto error_param;
2877 }
2878
2879
2880 v_ret = (enum virtchnl_status_code)
2881 ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector);
2882 if (v_ret)
2883 goto error_param;
2884 }
2885
2886error_param:
2887
2888 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
2889 NULL, 0);
2890}
2891
2892
2893
2894
2895
2896
2897
2898
2899static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
2900{
2901 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2902 struct virtchnl_vsi_queue_config_info *qci =
2903 (struct virtchnl_vsi_queue_config_info *)msg;
2904 struct virtchnl_queue_pair_info *qpi;
2905 u16 num_rxq = 0, num_txq = 0;
2906 struct ice_pf *pf = vf->pf;
2907 struct ice_vsi *vsi;
2908 int i;
2909
2910 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2911 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2912 goto error_param;
2913 }
2914
2915 if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
2916 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2917 goto error_param;
2918 }
2919
2920 vsi = pf->vsi[vf->lan_vsi_idx];
2921 if (!vsi) {
2922 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2923 goto error_param;
2924 }
2925
2926 if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
2927 qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
2928 dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
2929 vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
2930 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2931 goto error_param;
2932 }
2933
2934 for (i = 0; i < qci->num_queue_pairs; i++) {
2935 qpi = &qci->qpair[i];
2936 if (qpi->txq.vsi_id != qci->vsi_id ||
2937 qpi->rxq.vsi_id != qci->vsi_id ||
2938 qpi->rxq.queue_id != qpi->txq.queue_id ||
2939 qpi->txq.headwb_enabled ||
2940 !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
2941 !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
2942 !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
2943 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2944 goto error_param;
2945 }
2946
2947 if (qpi->txq.ring_len > 0) {
2948 num_txq++;
2949 vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
2950 vsi->tx_rings[i]->count = qpi->txq.ring_len;
2951 }
2952
2953
2954 if (qpi->rxq.ring_len > 0) {
2955 num_rxq++;
2956 vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
2957 vsi->rx_rings[i]->count = qpi->rxq.ring_len;
2958
2959 if (qpi->rxq.databuffer_size != 0 &&
2960 (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
2961 qpi->rxq.databuffer_size < 1024)) {
2962 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2963 goto error_param;
2964 }
2965 vsi->rx_buf_len = qpi->rxq.databuffer_size;
2966 vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
2967 if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
2968 qpi->rxq.max_pkt_size < 64) {
2969 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2970 goto error_param;
2971 }
2972 }
2973
2974 vsi->max_frame = qpi->rxq.max_pkt_size;
2975 }
2976
2977
2978
2979
2980 vsi->num_txq = num_txq;
2981 vsi->num_rxq = num_rxq;
2982
2983 vsi->tc_cfg.tc_info[0].qcount_tx = num_txq;
2984 vsi->tc_cfg.tc_info[0].qcount_rx = num_rxq;
2985
2986 if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi))
2987 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2988
2989error_param:
2990
2991 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
2992 NULL, 0);
2993}
2994
2995
2996
2997
2998
2999static bool ice_is_vf_trusted(struct ice_vf *vf)
3000{
3001 return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
3002}
3003
3004
3005
3006
3007
3008
3009
3010static bool ice_can_vf_change_mac(struct ice_vf *vf)
3011{
3012
3013
3014
3015
3016 if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
3017 return false;
3018
3019 return true;
3020}
3021
3022
3023
3024
3025
3026
3027
3028static int
3029ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
3030{
3031 struct device *dev = ice_pf_to_dev(vf->pf);
3032 enum ice_status status;
3033
3034
3035 if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3036 return 0;
3037
3038 if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
3039 dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
3040 return -EPERM;
3041 }
3042
3043 status = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
3044 if (status == ICE_ERR_ALREADY_EXISTS) {
3045 dev_err(dev, "MAC %pM already exists for VF %d\n", mac_addr,
3046 vf->vf_id);
3047 return -EEXIST;
3048 } else if (status) {
3049 dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %s\n",
3050 mac_addr, vf->vf_id, ice_stat_str(status));
3051 return -EIO;
3052 }
3053
3054
3055
3056
3057
3058 if (is_unicast_ether_addr(mac_addr))
3059 ether_addr_copy(vf->dflt_lan_addr.addr, mac_addr);
3060
3061 vf->num_mac++;
3062
3063 return 0;
3064}
3065
3066
3067
3068
3069
3070
3071
3072static int
3073ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
3074{
3075 struct device *dev = ice_pf_to_dev(vf->pf);
3076 enum ice_status status;
3077
3078 if (!ice_can_vf_change_mac(vf) &&
3079 ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3080 return 0;
3081
3082 status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
3083 if (status == ICE_ERR_DOES_NOT_EXIST) {
3084 dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
3085 vf->vf_id);
3086 return -ENOENT;
3087 } else if (status) {
3088 dev_err(dev, "Failed to delete MAC %pM for VF %d, error %s\n",
3089 mac_addr, vf->vf_id, ice_stat_str(status));
3090 return -EIO;
3091 }
3092
3093 if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3094 eth_zero_addr(vf->dflt_lan_addr.addr);
3095
3096 vf->num_mac--;
3097
3098 return 0;
3099}
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109static int
3110ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
3111{
3112 int (*ice_vc_cfg_mac)
3113 (struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr);
3114 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3115 struct virtchnl_ether_addr_list *al =
3116 (struct virtchnl_ether_addr_list *)msg;
3117 struct ice_pf *pf = vf->pf;
3118 enum virtchnl_ops vc_op;
3119 struct ice_vsi *vsi;
3120 int i;
3121
3122 if (set) {
3123 vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
3124 ice_vc_cfg_mac = ice_vc_add_mac_addr;
3125 } else {
3126 vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
3127 ice_vc_cfg_mac = ice_vc_del_mac_addr;
3128 }
3129
3130 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
3131 !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
3132 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3133 goto handle_mac_exit;
3134 }
3135
3136
3137
3138
3139
3140 if (set && !ice_is_vf_trusted(vf) &&
3141 (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
3142 dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
3143 vf->vf_id);
3144 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3145 goto handle_mac_exit;
3146 }
3147
3148 vsi = pf->vsi[vf->lan_vsi_idx];
3149 if (!vsi) {
3150 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3151 goto handle_mac_exit;
3152 }
3153
3154 for (i = 0; i < al->num_elements; i++) {
3155 u8 *mac_addr = al->list[i].addr;
3156 int result;
3157
3158 if (is_broadcast_ether_addr(mac_addr) ||
3159 is_zero_ether_addr(mac_addr))
3160 continue;
3161
3162 result = ice_vc_cfg_mac(vf, vsi, mac_addr);
3163 if (result == -EEXIST || result == -ENOENT) {
3164 continue;
3165 } else if (result) {
3166 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
3167 goto handle_mac_exit;
3168 }
3169 }
3170
3171handle_mac_exit:
3172
3173 return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
3174}
3175
3176
3177
3178
3179
3180
3181
3182
3183static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
3184{
3185 return ice_vc_handle_mac_addr_msg(vf, msg, true);
3186}
3187
3188
3189
3190
3191
3192
3193
3194
3195static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
3196{
3197 return ice_vc_handle_mac_addr_msg(vf, msg, false);
3198}
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
3211{
3212 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3213 struct virtchnl_vf_res_request *vfres =
3214 (struct virtchnl_vf_res_request *)msg;
3215 u16 req_queues = vfres->num_queue_pairs;
3216 struct ice_pf *pf = vf->pf;
3217 u16 max_allowed_vf_queues;
3218 u16 tx_rx_queue_left;
3219 struct device *dev;
3220 u16 cur_queues;
3221
3222 dev = ice_pf_to_dev(pf);
3223 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3224 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3225 goto error_param;
3226 }
3227
3228 cur_queues = vf->num_vf_qs;
3229 tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
3230 ice_get_avail_rxq_count(pf));
3231 max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
3232 if (!req_queues) {
3233 dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
3234 vf->vf_id);
3235 } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
3236 dev_err(dev, "VF %d tried to request more than %d queues.\n",
3237 vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
3238 vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
3239 } else if (req_queues > cur_queues &&
3240 req_queues - cur_queues > tx_rx_queue_left) {
3241 dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
3242 vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
3243 vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
3244 ICE_MAX_RSS_QS_PER_VF);
3245 } else {
3246
3247 vf->num_req_qs = req_queues;
3248 ice_vc_reset_vf(vf);
3249 dev_info(dev, "VF %d granted request of %u queues.\n",
3250 vf->vf_id, req_queues);
3251 return 0;
3252 }
3253
3254error_param:
3255
3256 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
3257 v_ret, (u8 *)vfres, sizeof(*vfres));
3258}
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270int
3271ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
3272 __be16 vlan_proto)
3273{
3274 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3275 struct device *dev;
3276 struct ice_vf *vf;
3277 u16 vlanprio;
3278 int ret;
3279
3280 dev = ice_pf_to_dev(pf);
3281 if (ice_validate_vf_id(pf, vf_id))
3282 return -EINVAL;
3283
3284 if (vlan_id >= VLAN_N_VID || qos > 7) {
3285 dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
3286 vf_id, vlan_id, qos);
3287 return -EINVAL;
3288 }
3289
3290 if (vlan_proto != htons(ETH_P_8021Q)) {
3291 dev_err(dev, "VF VLAN protocol is not supported\n");
3292 return -EPROTONOSUPPORT;
3293 }
3294
3295 vf = &pf->vf[vf_id];
3296 ret = ice_check_vf_ready_for_cfg(vf);
3297 if (ret)
3298 return ret;
3299
3300 vlanprio = vlan_id | (qos << VLAN_PRIO_SHIFT);
3301
3302 if (vf->port_vlan_info == vlanprio) {
3303
3304 dev_dbg(dev, "Duplicate pvid %d request\n", vlanprio);
3305 return 0;
3306 }
3307
3308 vf->port_vlan_info = vlanprio;
3309
3310 if (vf->port_vlan_info)
3311 dev_info(dev, "Setting VLAN %d, QoS 0x%x on VF %d\n",
3312 vlan_id, qos, vf_id);
3313 else
3314 dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id);
3315
3316 ice_vc_reset_vf(vf);
3317
3318 return 0;
3319}
3320
3321
3322
3323
3324
3325
3326
3327static bool ice_vf_vlan_offload_ena(u32 caps)
3328{
3329 return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
3330}
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
3341{
3342 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3343 struct virtchnl_vlan_filter_list *vfl =
3344 (struct virtchnl_vlan_filter_list *)msg;
3345 struct ice_pf *pf = vf->pf;
3346 bool vlan_promisc = false;
3347 struct ice_vsi *vsi;
3348 struct device *dev;
3349 struct ice_hw *hw;
3350 int status = 0;
3351 u8 promisc_m;
3352 int i;
3353
3354 dev = ice_pf_to_dev(pf);
3355 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3356 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3357 goto error_param;
3358 }
3359
3360 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3361 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3362 goto error_param;
3363 }
3364
3365 if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
3366 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3367 goto error_param;
3368 }
3369
3370 for (i = 0; i < vfl->num_elements; i++) {
3371 if (vfl->vlan_id[i] >= VLAN_N_VID) {
3372 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3373 dev_err(dev, "invalid VF VLAN id %d\n",
3374 vfl->vlan_id[i]);
3375 goto error_param;
3376 }
3377 }
3378
3379 hw = &pf->hw;
3380 vsi = pf->vsi[vf->lan_vsi_idx];
3381 if (!vsi) {
3382 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3383 goto error_param;
3384 }
3385
3386 if (add_v && !ice_is_vf_trusted(vf) &&
3387 vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
3388 dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
3389 vf->vf_id);
3390
3391
3392
3393 goto error_param;
3394 }
3395
3396 if (vsi->info.pvid) {
3397 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3398 goto error_param;
3399 }
3400
3401 if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
3402 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
3403 test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags))
3404 vlan_promisc = true;
3405
3406 if (add_v) {
3407 for (i = 0; i < vfl->num_elements; i++) {
3408 u16 vid = vfl->vlan_id[i];
3409
3410 if (!ice_is_vf_trusted(vf) &&
3411 vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
3412 dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
3413 vf->vf_id);
3414
3415
3416
3417
3418 goto error_param;
3419 }
3420
3421
3422
3423
3424
3425 if (!vid)
3426 continue;
3427
3428 status = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
3429 if (status) {
3430 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3431 goto error_param;
3432 }
3433
3434
3435 if (!vlan_promisc && vid &&
3436 !ice_vsi_is_vlan_pruning_ena(vsi)) {
3437 status = ice_cfg_vlan_pruning(vsi, true, false);
3438 if (status) {
3439 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3440 dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
3441 vid, status);
3442 goto error_param;
3443 }
3444 } else if (vlan_promisc) {
3445
3446 promisc_m = ICE_PROMISC_VLAN_TX |
3447 ICE_PROMISC_VLAN_RX;
3448
3449 status = ice_set_vsi_promisc(hw, vsi->idx,
3450 promisc_m, vid);
3451 if (status) {
3452 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3453 dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
3454 vid, status);
3455 }
3456 }
3457 }
3458 } else {
3459
3460
3461
3462
3463
3464
3465
3466 int num_vf_vlan;
3467
3468 num_vf_vlan = vsi->num_vlan;
3469 for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
3470 u16 vid = vfl->vlan_id[i];
3471
3472
3473
3474
3475
3476 if (!vid)
3477 continue;
3478
3479
3480
3481
3482 status = ice_vsi_kill_vlan(vsi, vid);
3483 if (status) {
3484 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3485 goto error_param;
3486 }
3487
3488
3489 if (vsi->num_vlan == 1 &&
3490 ice_vsi_is_vlan_pruning_ena(vsi))
3491 ice_cfg_vlan_pruning(vsi, false, false);
3492
3493
3494 if (vlan_promisc) {
3495 promisc_m = ICE_PROMISC_VLAN_TX |
3496 ICE_PROMISC_VLAN_RX;
3497
3498 ice_clear_vsi_promisc(hw, vsi->idx,
3499 promisc_m, vid);
3500 }
3501 }
3502 }
3503
3504error_param:
3505
3506 if (add_v)
3507 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
3508 NULL, 0);
3509 else
3510 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
3511 NULL, 0);
3512}
3513
3514
3515
3516
3517
3518
3519
3520
3521static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
3522{
3523 return ice_vc_process_vlan_msg(vf, msg, true);
3524}
3525
3526
3527
3528
3529
3530
3531
3532
3533static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
3534{
3535 return ice_vc_process_vlan_msg(vf, msg, false);
3536}
3537
3538
3539
3540
3541
3542
3543
3544static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
3545{
3546 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3547 struct ice_pf *pf = vf->pf;
3548 struct ice_vsi *vsi;
3549
3550 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3551 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3552 goto error_param;
3553 }
3554
3555 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3556 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3557 goto error_param;
3558 }
3559
3560 vsi = pf->vsi[vf->lan_vsi_idx];
3561 if (ice_vsi_manage_vlan_stripping(vsi, true))
3562 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3563
3564error_param:
3565 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
3566 v_ret, NULL, 0);
3567}
3568
3569
3570
3571
3572
3573
3574
3575static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
3576{
3577 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3578 struct ice_pf *pf = vf->pf;
3579 struct ice_vsi *vsi;
3580
3581 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3582 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3583 goto error_param;
3584 }
3585
3586 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3587 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3588 goto error_param;
3589 }
3590
3591 vsi = pf->vsi[vf->lan_vsi_idx];
3592 if (!vsi) {
3593 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3594 goto error_param;
3595 }
3596
3597 if (ice_vsi_manage_vlan_stripping(vsi, false))
3598 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3599
3600error_param:
3601 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
3602 v_ret, NULL, 0);
3603}
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
3616{
3617 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
3618
3619 if (!vsi)
3620 return -EINVAL;
3621
3622
3623 if (vsi->info.pvid)
3624 return 0;
3625
3626 if (ice_vf_vlan_offload_ena(vf->driver_caps))
3627 return ice_vsi_manage_vlan_stripping(vsi, true);
3628 else
3629 return ice_vsi_manage_vlan_stripping(vsi, false);
3630}
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
3641{
3642 u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
3643 s16 vf_id = le16_to_cpu(event->desc.retval);
3644 u16 msglen = event->msg_len;
3645 u8 *msg = event->msg_buf;
3646 struct ice_vf *vf = NULL;
3647 struct device *dev;
3648 int err = 0;
3649
3650 dev = ice_pf_to_dev(pf);
3651 if (ice_validate_vf_id(pf, vf_id)) {
3652 err = -EINVAL;
3653 goto error_handler;
3654 }
3655
3656 vf = &pf->vf[vf_id];
3657
3658
3659 if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
3660 err = -EPERM;
3661 goto error_handler;
3662 }
3663
3664
3665 err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
3666 if (err) {
3667 if (err == VIRTCHNL_STATUS_ERR_PARAM)
3668 err = -EPERM;
3669 else
3670 err = -EINVAL;
3671 }
3672
3673error_handler:
3674 if (err) {
3675 ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
3676 NULL, 0);
3677 dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
3678 vf_id, v_opcode, msglen, err);
3679 return;
3680 }
3681
3682 switch (v_opcode) {
3683 case VIRTCHNL_OP_VERSION:
3684 err = ice_vc_get_ver_msg(vf, msg);
3685 break;
3686 case VIRTCHNL_OP_GET_VF_RESOURCES:
3687 err = ice_vc_get_vf_res_msg(vf, msg);
3688 if (ice_vf_init_vlan_stripping(vf))
3689 dev_err(dev, "Failed to initialize VLAN stripping for VF %d\n",
3690 vf->vf_id);
3691 ice_vc_notify_vf_link_state(vf);
3692 break;
3693 case VIRTCHNL_OP_RESET_VF:
3694 ice_vc_reset_vf_msg(vf);
3695 break;
3696 case VIRTCHNL_OP_ADD_ETH_ADDR:
3697 err = ice_vc_add_mac_addr_msg(vf, msg);
3698 break;
3699 case VIRTCHNL_OP_DEL_ETH_ADDR:
3700 err = ice_vc_del_mac_addr_msg(vf, msg);
3701 break;
3702 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
3703 err = ice_vc_cfg_qs_msg(vf, msg);
3704 break;
3705 case VIRTCHNL_OP_ENABLE_QUEUES:
3706 err = ice_vc_ena_qs_msg(vf, msg);
3707 ice_vc_notify_vf_link_state(vf);
3708 break;
3709 case VIRTCHNL_OP_DISABLE_QUEUES:
3710 err = ice_vc_dis_qs_msg(vf, msg);
3711 break;
3712 case VIRTCHNL_OP_REQUEST_QUEUES:
3713 err = ice_vc_request_qs_msg(vf, msg);
3714 break;
3715 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
3716 err = ice_vc_cfg_irq_map_msg(vf, msg);
3717 break;
3718 case VIRTCHNL_OP_CONFIG_RSS_KEY:
3719 err = ice_vc_config_rss_key(vf, msg);
3720 break;
3721 case VIRTCHNL_OP_CONFIG_RSS_LUT:
3722 err = ice_vc_config_rss_lut(vf, msg);
3723 break;
3724 case VIRTCHNL_OP_GET_STATS:
3725 err = ice_vc_get_stats_msg(vf, msg);
3726 break;
3727 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
3728 err = ice_vc_cfg_promiscuous_mode_msg(vf, msg);
3729 break;
3730 case VIRTCHNL_OP_ADD_VLAN:
3731 err = ice_vc_add_vlan_msg(vf, msg);
3732 break;
3733 case VIRTCHNL_OP_DEL_VLAN:
3734 err = ice_vc_remove_vlan_msg(vf, msg);
3735 break;
3736 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
3737 err = ice_vc_ena_vlan_stripping(vf);
3738 break;
3739 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
3740 err = ice_vc_dis_vlan_stripping(vf);
3741 break;
3742 case VIRTCHNL_OP_UNKNOWN:
3743 default:
3744 dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
3745 vf_id);
3746 err = ice_vc_send_msg_to_vf(vf, v_opcode,
3747 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
3748 NULL, 0);
3749 break;
3750 }
3751 if (err) {
3752
3753
3754
3755 dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
3756 vf_id, v_opcode, err);
3757 }
3758}
3759
3760
3761
3762
3763
3764
3765
3766
3767
3768int
3769ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
3770{
3771 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3772 struct ice_vf *vf;
3773
3774 if (ice_validate_vf_id(pf, vf_id))
3775 return -EINVAL;
3776
3777 vf = &pf->vf[vf_id];
3778
3779 if (ice_check_vf_init(pf, vf))
3780 return -EBUSY;
3781
3782 ivi->vf = vf_id;
3783 ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr);
3784
3785
3786 ivi->vlan = vf->port_vlan_info & VLAN_VID_MASK;
3787 ivi->qos = (vf->port_vlan_info & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
3788
3789 ivi->trusted = vf->trusted;
3790 ivi->spoofchk = vf->spoofchk;
3791 if (!vf->link_forced)
3792 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
3793 else if (vf->link_up)
3794 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
3795 else
3796 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
3797 ivi->max_tx_rate = vf->tx_rate;
3798 ivi->min_tx_rate = 0;
3799 return 0;
3800}
3801
3802
3803
3804
3805
3806
3807
3808
3809static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac)
3810{
3811 struct ice_sw_recipe *mac_recipe_list =
3812 &pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC];
3813 struct ice_fltr_mgmt_list_entry *list_itr;
3814 struct list_head *rule_head;
3815 struct mutex *rule_lock;
3816
3817 rule_head = &mac_recipe_list->filt_rules;
3818 rule_lock = &mac_recipe_list->filt_rule_lock;
3819
3820 mutex_lock(rule_lock);
3821 list_for_each_entry(list_itr, rule_head, list_entry) {
3822 u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3823
3824 if (ether_addr_equal(existing_mac, umac)) {
3825 mutex_unlock(rule_lock);
3826 return true;
3827 }
3828 }
3829
3830 mutex_unlock(rule_lock);
3831
3832 return false;
3833}
3834
3835
3836
3837
3838
3839
3840
3841
3842
3843int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
3844{
3845 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3846 struct ice_vf *vf;
3847 int ret;
3848
3849 if (ice_validate_vf_id(pf, vf_id))
3850 return -EINVAL;
3851
3852 if (is_multicast_ether_addr(mac)) {
3853 netdev_err(netdev, "%pM not a valid unicast address\n", mac);
3854 return -EINVAL;
3855 }
3856
3857 vf = &pf->vf[vf_id];
3858
3859 if (ether_addr_equal(vf->dflt_lan_addr.addr, mac))
3860 return 0;
3861
3862 ret = ice_check_vf_ready_for_cfg(vf);
3863 if (ret)
3864 return ret;
3865
3866 if (ice_unicast_mac_exists(pf, mac)) {
3867 netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n",
3868 mac, vf_id, mac);
3869 return -EINVAL;
3870 }
3871
3872
3873
3874
3875 ether_addr_copy(vf->dflt_lan_addr.addr, mac);
3876 if (is_zero_ether_addr(mac)) {
3877
3878 vf->pf_set_mac = false;
3879 netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
3880 vf->vf_id);
3881 } else {
3882
3883 vf->pf_set_mac = true;
3884 netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
3885 mac, vf_id);
3886 }
3887
3888 ice_vc_reset_vf(vf);
3889 return 0;
3890}
3891
3892
3893
3894
3895
3896
3897
3898
3899
3900int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
3901{
3902 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3903 struct ice_vf *vf;
3904 int ret;
3905
3906 if (ice_validate_vf_id(pf, vf_id))
3907 return -EINVAL;
3908
3909 vf = &pf->vf[vf_id];
3910 ret = ice_check_vf_ready_for_cfg(vf);
3911 if (ret)
3912 return ret;
3913
3914
3915 if (trusted == vf->trusted)
3916 return 0;
3917
3918 vf->trusted = trusted;
3919 ice_vc_reset_vf(vf);
3920 dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
3921 vf_id, trusted ? "" : "un");
3922
3923 return 0;
3924}
3925
3926
3927
3928
3929
3930
3931
3932
3933
3934int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
3935{
3936 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3937 struct ice_vf *vf;
3938 int ret;
3939
3940 if (ice_validate_vf_id(pf, vf_id))
3941 return -EINVAL;
3942
3943 vf = &pf->vf[vf_id];
3944 ret = ice_check_vf_ready_for_cfg(vf);
3945 if (ret)
3946 return ret;
3947
3948 switch (link_state) {
3949 case IFLA_VF_LINK_STATE_AUTO:
3950 vf->link_forced = false;
3951 break;
3952 case IFLA_VF_LINK_STATE_ENABLE:
3953 vf->link_forced = true;
3954 vf->link_up = true;
3955 break;
3956 case IFLA_VF_LINK_STATE_DISABLE:
3957 vf->link_forced = true;
3958 vf->link_up = false;
3959 break;
3960 default:
3961 return -EINVAL;
3962 }
3963
3964 ice_vc_notify_vf_link_state(vf);
3965
3966 return 0;
3967}
3968
3969
3970
3971
3972
3973
3974
3975int ice_get_vf_stats(struct net_device *netdev, int vf_id,
3976 struct ifla_vf_stats *vf_stats)
3977{
3978 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3979 struct ice_eth_stats *stats;
3980 struct ice_vsi *vsi;
3981 struct ice_vf *vf;
3982 int ret;
3983
3984 if (ice_validate_vf_id(pf, vf_id))
3985 return -EINVAL;
3986
3987 vf = &pf->vf[vf_id];
3988 ret = ice_check_vf_ready_for_cfg(vf);
3989 if (ret)
3990 return ret;
3991
3992 vsi = pf->vsi[vf->lan_vsi_idx];
3993 if (!vsi)
3994 return -EINVAL;
3995
3996 ice_update_eth_stats(vsi);
3997 stats = &vsi->eth_stats;
3998
3999 memset(vf_stats, 0, sizeof(*vf_stats));
4000
4001 vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
4002 stats->rx_multicast;
4003 vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
4004 stats->tx_multicast;
4005 vf_stats->rx_bytes = stats->rx_bytes;
4006 vf_stats->tx_bytes = stats->tx_bytes;
4007 vf_stats->broadcast = stats->rx_broadcast;
4008 vf_stats->multicast = stats->rx_multicast;
4009 vf_stats->rx_dropped = stats->rx_discards;
4010 vf_stats->tx_dropped = stats->tx_discards;
4011
4012 return 0;
4013}
4014
4015
4016
4017
4018
4019void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
4020{
4021 struct ice_pf *pf = vf->pf;
4022 struct device *dev;
4023
4024 dev = ice_pf_to_dev(pf);
4025
4026 dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
4027 vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
4028 vf->dflt_lan_addr.addr,
4029 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
4030 ? "on" : "off");
4031}
4032
4033
4034
4035
4036
4037
4038
4039void ice_print_vfs_mdd_events(struct ice_pf *pf)
4040{
4041 struct device *dev = ice_pf_to_dev(pf);
4042 struct ice_hw *hw = &pf->hw;
4043 int i;
4044
4045
4046 if (!test_and_clear_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state))
4047 return;
4048
4049
4050 if (time_is_after_jiffies(pf->last_printed_mdd_jiffies + HZ * 1))
4051 return;
4052
4053 pf->last_printed_mdd_jiffies = jiffies;
4054
4055 ice_for_each_vf(pf, i) {
4056 struct ice_vf *vf = &pf->vf[i];
4057
4058
4059 if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
4060 vf->mdd_rx_events.last_printed =
4061 vf->mdd_rx_events.count;
4062 ice_print_vf_rx_mdd_event(vf);
4063 }
4064
4065
4066 if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
4067 vf->mdd_tx_events.last_printed =
4068 vf->mdd_tx_events.count;
4069
4070 dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
4071 vf->mdd_tx_events.count, hw->pf_id, i,
4072 vf->dflt_lan_addr.addr);
4073 }
4074 }
4075}
4076
4077
4078
4079
4080
4081
4082
4083
4084void ice_restore_all_vfs_msi_state(struct pci_dev *pdev)
4085{
4086 struct pci_dev *vfdev;
4087 u16 vf_id;
4088 int pos;
4089
4090 if (!pci_num_vf(pdev))
4091 return;
4092
4093 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4094 if (pos) {
4095 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID,
4096 &vf_id);
4097 vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
4098 while (vfdev) {
4099 if (vfdev->is_virtfn && vfdev->physfn == pdev)
4100 pci_restore_msi_state(vfdev);
4101 vfdev = pci_get_device(pdev->vendor, vf_id,
4102 vfdev);
4103 }
4104 }
4105}
4106