1
2
3
4#include "ice.h"
5#include "ice_lib.h"
6
7
8
9
10
11static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
12{
13 switch (ice_err) {
14 case ICE_SUCCESS:
15 return VIRTCHNL_STATUS_SUCCESS;
16 case ICE_ERR_BAD_PTR:
17 case ICE_ERR_INVAL_SIZE:
18 case ICE_ERR_DEVICE_NOT_SUPPORTED:
19 case ICE_ERR_PARAM:
20 case ICE_ERR_CFG:
21 return VIRTCHNL_STATUS_ERR_PARAM;
22 case ICE_ERR_NO_MEMORY:
23 return VIRTCHNL_STATUS_ERR_NO_MEMORY;
24 case ICE_ERR_NOT_READY:
25 case ICE_ERR_RESET_FAILED:
26 case ICE_ERR_FW_API_VER:
27 case ICE_ERR_AQ_ERROR:
28 case ICE_ERR_AQ_TIMEOUT:
29 case ICE_ERR_AQ_FULL:
30 case ICE_ERR_AQ_NO_WORK:
31 case ICE_ERR_AQ_EMPTY:
32 return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
33 default:
34 return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
35 }
36}
37
38
39
40
41
42
43
44
45
46static void
47ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
48 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
49{
50 struct ice_hw *hw = &pf->hw;
51 struct ice_vf *vf = pf->vf;
52 int i;
53
54 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
55
56 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
57 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
58 continue;
59
60
61
62
63 ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
64 msglen, NULL);
65 }
66}
67
68
69
70
71
72
73
74
75static void
76ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
77 int ice_link_speed, bool link_up)
78{
79 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
80 pfe->event_data.link_event_adv.link_status = link_up;
81
82 pfe->event_data.link_event_adv.link_speed =
83 ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
84 } else {
85 pfe->event_data.link_event.link_status = link_up;
86
87 pfe->event_data.link_event.link_speed =
88 (enum virtchnl_link_speed)
89 ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
90 }
91}
92
93
94
95
96
97
98
99static void
100ice_set_pfe_link_forced(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
101 bool link_up)
102{
103 u16 link_speed;
104
105 if (link_up)
106 link_speed = ICE_AQ_LINK_SPEED_100GB;
107 else
108 link_speed = ICE_AQ_LINK_SPEED_UNKNOWN;
109
110 ice_set_pfe_link(vf, pfe, link_speed, link_up);
111}
112
113
114
115
116
117
118
119static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
120{
121 struct virtchnl_pf_event pfe = { 0 };
122 struct ice_link_status *ls;
123 struct ice_pf *pf = vf->pf;
124 struct ice_hw *hw;
125
126 hw = &pf->hw;
127 ls = &hw->port_info->phy.link_info;
128
129 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
130 pfe.severity = PF_EVENT_SEVERITY_INFO;
131
132 if (vf->link_forced)
133 ice_set_pfe_link_forced(vf, &pfe, vf->link_up);
134 else
135 ice_set_pfe_link(vf, &pfe, ls->link_speed, ls->link_info &
136 ICE_AQ_LINK_UP);
137
138 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
139 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
140 sizeof(pfe), NULL);
141}
142
143
144
145
146
147static void ice_free_vf_res(struct ice_vf *vf)
148{
149 struct ice_pf *pf = vf->pf;
150 int i, last_vector_idx;
151
152
153
154
155 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
156
157
158 if (vf->lan_vsi_idx) {
159 ice_vsi_release(pf->vsi[vf->lan_vsi_idx]);
160 vf->lan_vsi_idx = 0;
161 vf->lan_vsi_num = 0;
162 vf->num_mac = 0;
163 }
164
165 last_vector_idx = vf->first_vector_idx + pf->num_vf_msix - 1;
166
167 for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
168 wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
169 ice_flush(&pf->hw);
170 }
171
172 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
173 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
174}
175
176
177
178
179
180static void ice_dis_vf_mappings(struct ice_vf *vf)
181{
182 struct ice_pf *pf = vf->pf;
183 struct ice_vsi *vsi;
184 int first, last, v;
185 struct ice_hw *hw;
186
187 hw = &pf->hw;
188 vsi = pf->vsi[vf->lan_vsi_idx];
189
190 wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
191 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
192
193 first = vf->first_vector_idx;
194 last = first + pf->num_vf_msix - 1;
195 for (v = first; v <= last; v++) {
196 u32 reg;
197
198 reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
199 GLINT_VECT2FUNC_IS_PF_M) |
200 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
201 GLINT_VECT2FUNC_PF_NUM_M));
202 wr32(hw, GLINT_VECT2FUNC(v), reg);
203 }
204
205 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
206 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
207 else
208 dev_err(&pf->pdev->dev,
209 "Scattered mode for VF Tx queues is not yet implemented\n");
210
211 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
212 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
213 else
214 dev_err(&pf->pdev->dev,
215 "Scattered mode for VF Rx queues is not yet implemented\n");
216}
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231static int ice_sriov_free_msix_res(struct ice_pf *pf)
232{
233 struct ice_res_tracker *res;
234
235 if (!pf)
236 return -EINVAL;
237
238 res = pf->irq_tracker;
239 if (!res)
240 return -EINVAL;
241
242
243 if (pf->sriov_base_vector < res->num_entries) {
244 res->end = res->num_entries;
245 pf->num_avail_sw_msix +=
246 res->num_entries - pf->sriov_base_vector;
247 }
248
249 pf->sriov_base_vector = 0;
250
251 return 0;
252}
253
254
255
256
257
258void ice_free_vfs(struct ice_pf *pf)
259{
260 struct ice_hw *hw = &pf->hw;
261 int tmp, i;
262
263 if (!pf->vf)
264 return;
265
266 while (test_and_set_bit(__ICE_VF_DIS, pf->state))
267 usleep_range(1000, 2000);
268
269
270 for (i = 0; i < pf->num_alloc_vfs; i++) {
271 struct ice_vsi *vsi;
272
273 if (!test_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states))
274 continue;
275
276 vsi = pf->vsi[pf->vf[i].lan_vsi_idx];
277
278 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, i);
279 ice_vsi_stop_rx_rings(vsi);
280
281 clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states);
282 }
283
284
285
286
287
288 if (!pci_vfs_assigned(pf->pdev))
289 pci_disable_sriov(pf->pdev);
290 else
291 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
292
293 tmp = pf->num_alloc_vfs;
294 pf->num_vf_qps = 0;
295 pf->num_alloc_vfs = 0;
296 for (i = 0; i < tmp; i++) {
297 if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
298
299 ice_dis_vf_mappings(&pf->vf[i]);
300
301
302
303
304
305
306 set_bit(ICE_VF_STATE_CFG_INTR, pf->vf[i].vf_states);
307 ice_free_vf_res(&pf->vf[i]);
308 }
309 }
310
311 if (ice_sriov_free_msix_res(pf))
312 dev_err(&pf->pdev->dev,
313 "Failed to free MSIX resources used by SR-IOV\n");
314
315 devm_kfree(&pf->pdev->dev, pf->vf);
316 pf->vf = NULL;
317
318
319
320
321
322 if (!pci_vfs_assigned(pf->pdev)) {
323 int vf_id;
324
325
326
327
328 for (vf_id = 0; vf_id < tmp; vf_id++) {
329 u32 reg_idx, bit_idx;
330
331 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
332 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
333 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
334 }
335 }
336 clear_bit(__ICE_VF_DIS, pf->state);
337 clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
338}
339
340
341
342
343
344
345
346
347
348
349static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr)
350{
351 struct ice_pf *pf = vf->pf;
352 u32 reg, reg_idx, bit_idx;
353 struct ice_hw *hw;
354 int vf_abs_id, i;
355
356 hw = &pf->hw;
357 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
358
359
360 clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
361
362
363
364
365
366
367
368 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
369
370
371
372
373 wr32(hw, VF_MBX_ARQLEN(vf_abs_id), 0);
374
375
376
377
378 if (!is_vflr) {
379
380 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
381 reg |= VPGEN_VFRTRIG_VFSWR_M;
382 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
383 }
384
385 reg_idx = (vf_abs_id) / 32;
386 bit_idx = (vf_abs_id) % 32;
387 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
388 ice_flush(hw);
389
390 wr32(hw, PF_PCI_CIAA,
391 VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
392 for (i = 0; i < 100; i++) {
393 reg = rd32(hw, PF_PCI_CIAD);
394 if ((reg & VF_TRANS_PENDING_M) != 0)
395 dev_err(&pf->pdev->dev,
396 "VF %d PCI transactions stuck\n", vf->vf_id);
397 udelay(1);
398 }
399}
400
401
402
403
404
405
406static void ice_vsi_set_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt, u16 vid)
407{
408 ctxt->info.vlan_flags = (ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
409 ICE_AQ_VSI_PVLAN_INSERT_PVID |
410 ICE_AQ_VSI_VLAN_EMOD_STR);
411 ctxt->info.pvid = cpu_to_le16(vid);
412 ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
413 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
414 ICE_AQ_VSI_PROP_SW_VALID);
415}
416
417
418
419
420
421static void ice_vsi_kill_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt)
422{
423 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
424 ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
425 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
426 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
427 ICE_AQ_VSI_PROP_SW_VALID);
428}
429
430
431
432
433
434
435
436static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable)
437{
438 struct device *dev = &vsi->back->pdev->dev;
439 struct ice_hw *hw = &vsi->back->hw;
440 struct ice_vsi_ctx *ctxt;
441 enum ice_status status;
442 int ret = 0;
443
444 ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
445 if (!ctxt)
446 return -ENOMEM;
447
448 ctxt->info = vsi->info;
449 if (enable)
450 ice_vsi_set_pvid_fill_ctxt(ctxt, vid);
451 else
452 ice_vsi_kill_pvid_fill_ctxt(ctxt);
453
454 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
455 if (status) {
456 dev_info(dev, "update VSI for port VLAN failed, err %d aq_err %d\n",
457 status, hw->adminq.sq_last_status);
458 ret = -EIO;
459 goto out;
460 }
461
462 vsi->info = ctxt->info;
463out:
464 devm_kfree(dev, ctxt);
465 return ret;
466}
467
468
469
470
471
472
473
474
475
476
477static struct ice_vsi *
478ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id)
479{
480 return ice_vsi_setup(pf, pi, ICE_VSI_VF, vf_id);
481}
482
483
484
485
486
487
488
489
490
491
492
493static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
494{
495 return pf->hw.func_caps.common_cap.msix_vector_first_id +
496 pf->sriov_base_vector + vf->vf_id * pf->num_vf_msix;
497}
498
499
500
501
502
503
504
505static int ice_alloc_vsi_res(struct ice_vf *vf)
506{
507 struct ice_pf *pf = vf->pf;
508 LIST_HEAD(tmp_add_list);
509 u8 broadcast[ETH_ALEN];
510 struct ice_vsi *vsi;
511 int status = 0;
512
513
514 vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
515
516 vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id);
517 if (!vsi) {
518 dev_err(&pf->pdev->dev, "Failed to create VF VSI\n");
519 return -ENOMEM;
520 }
521
522 vf->lan_vsi_idx = vsi->idx;
523 vf->lan_vsi_num = vsi->vsi_num;
524
525
526 if (vf->port_vlan_id) {
527 ice_vsi_manage_pvid(vsi, vf->port_vlan_id, true);
528 ice_vsi_add_vlan(vsi, vf->port_vlan_id & ICE_VLAN_M);
529 }
530
531 eth_broadcast_addr(broadcast);
532
533 status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
534 if (status)
535 goto ice_alloc_vsi_res_exit;
536
537 if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) {
538 status = ice_add_mac_to_list(vsi, &tmp_add_list,
539 vf->dflt_lan_addr.addr);
540 if (status)
541 goto ice_alloc_vsi_res_exit;
542 }
543
544 status = ice_add_mac(&pf->hw, &tmp_add_list);
545 if (status)
546 dev_err(&pf->pdev->dev, "could not add mac filters\n");
547
548
549
550
551
552
553
554 clear_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states);
555ice_alloc_vsi_res_exit:
556 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
557 return status;
558}
559
560
561
562
563
564static int ice_alloc_vf_res(struct ice_vf *vf)
565{
566 struct ice_pf *pf = vf->pf;
567 int tx_rx_queue_left;
568 int status;
569
570
571 status = ice_alloc_vsi_res(vf);
572 if (status)
573 goto ice_alloc_vf_res_exit;
574
575
576
577
578 tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx);
579 tx_rx_queue_left += ICE_DFLT_QS_PER_VF;
580 if (vf->num_req_qs && vf->num_req_qs <= tx_rx_queue_left &&
581 vf->num_req_qs != vf->num_vf_qs)
582 vf->num_vf_qs = vf->num_req_qs;
583
584 if (vf->trusted)
585 set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
586 else
587 clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
588
589
590 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
591
592 return status;
593
594ice_alloc_vf_res_exit:
595 ice_free_vf_res(vf);
596 return status;
597}
598
599
600
601
602
603
604
605
606static void ice_ena_vf_mappings(struct ice_vf *vf)
607{
608 struct ice_pf *pf = vf->pf;
609 struct ice_vsi *vsi;
610 int first, last, v;
611 struct ice_hw *hw;
612 int abs_vf_id;
613 u32 reg;
614
615 hw = &pf->hw;
616 vsi = pf->vsi[vf->lan_vsi_idx];
617 first = vf->first_vector_idx;
618 last = (first + pf->num_vf_msix) - 1;
619 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
620
621
622 reg = (((first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) |
623 ((last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) |
624 VPINT_ALLOC_VALID_M);
625 wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
626
627 reg = (((first << VPINT_ALLOC_PCI_FIRST_S) & VPINT_ALLOC_PCI_FIRST_M) |
628 ((last << VPINT_ALLOC_PCI_LAST_S) & VPINT_ALLOC_PCI_LAST_M) |
629 VPINT_ALLOC_PCI_VALID_M);
630 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
631
632 for (v = first; v <= last; v++) {
633 reg = (((abs_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
634 GLINT_VECT2FUNC_VF_NUM_M) |
635 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
636 GLINT_VECT2FUNC_PF_NUM_M));
637 wr32(hw, GLINT_VECT2FUNC(v), reg);
638 }
639
640
641
642
643 wr32(hw, VPINT_MBX_CTL(abs_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M | 0);
644
645 wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
646
647
648 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
649
650
651
652
653 reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
654 VPLAN_TX_QBASE_VFFIRSTQ_M) |
655 (((vsi->alloc_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
656 VPLAN_TX_QBASE_VFNUMQ_M));
657 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
658 } else {
659 dev_err(&pf->pdev->dev,
660 "Scattered mode for VF Tx queues is not yet implemented\n");
661 }
662
663
664 wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
665
666
667 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
668
669
670
671
672 reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
673 VPLAN_RX_QBASE_VFFIRSTQ_M) |
674 (((vsi->alloc_txq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
675 VPLAN_RX_QBASE_VFNUMQ_M));
676 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
677 } else {
678 dev_err(&pf->pdev->dev,
679 "Scattered mode for VF Rx queues is not yet implemented\n");
680 }
681}
682
683
684
685
686
687
688
689
690
691
692
693static int
694ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
695{
696 bool checked_min_res = false;
697 int res;
698
699
700
701
702
703
704
705
706 res = max_res;
707 while ((res >= min_res) && !checked_min_res) {
708 int num_all_res;
709
710 num_all_res = pf->num_alloc_vfs * res;
711 if (num_all_res <= avail_res)
712 return res;
713
714 if (res == min_res)
715 checked_min_res = true;
716
717 res = DIV_ROUND_UP(res, 2);
718 }
719 return 0;
720}
721
722
723
724
725
726
727int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
728{
729 struct ice_pf *pf;
730
731 if (!vf || !q_vector)
732 return -EINVAL;
733
734 pf = vf->pf;
735
736
737 return pf->sriov_base_vector + pf->num_vf_msix * vf->vf_id +
738 q_vector->v_idx + 1;
739}
740
741
742
743
744
745
746
747
748
749
750static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
751{
752 int i;
753
754 if (!res)
755 return -EINVAL;
756
757 for (i = res->num_entries - 1; i >= 0; i--)
758 if (res->list[i] & ICE_RES_VALID_BIT)
759 return i;
760
761 return 0;
762}
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
783{
784 int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
785 u16 pf_total_msix_vectors =
786 pf->hw.func_caps.common_cap.num_msix_vectors;
787 struct ice_res_tracker *res = pf->irq_tracker;
788 int sriov_base_vector;
789
790 if (max_valid_res_idx < 0)
791 return max_valid_res_idx;
792
793 sriov_base_vector = pf_total_msix_vectors - num_msix_needed;
794
795
796
797
798 if (sriov_base_vector <= max_valid_res_idx)
799 return -EINVAL;
800
801 pf->sriov_base_vector = sriov_base_vector;
802
803
804 if (num_msix_needed > (pf_total_msix_vectors - res->num_entries)) {
805 pf->num_avail_sw_msix -=
806 res->num_entries - pf->sriov_base_vector;
807 res->end = pf->sriov_base_vector;
808 }
809
810 return 0;
811}
812
813
814
815
816
817
818
819
820
821static int ice_check_avail_res(struct ice_pf *pf)
822{
823 int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
824 u16 num_msix, num_txq, num_rxq, num_avail_msix;
825
826 if (!pf->num_alloc_vfs || max_valid_res_idx < 0)
827 return -EINVAL;
828
829
830 num_avail_msix = pf->hw.func_caps.common_cap.num_msix_vectors -
831 (max_valid_res_idx + 1);
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848 if (pf->num_alloc_vfs <= 16) {
849 num_msix = ice_determine_res(pf, num_avail_msix,
850 ICE_MAX_INTR_PER_VF,
851 ICE_MIN_INTR_PER_VF);
852 } else if (pf->num_alloc_vfs <= ICE_MAX_VF_COUNT) {
853 num_msix = ice_determine_res(pf, num_avail_msix,
854 ICE_DFLT_INTR_PER_VF,
855 ICE_MIN_INTR_PER_VF);
856 } else {
857 dev_err(&pf->pdev->dev,
858 "Number of VFs %d exceeds max VF count %d\n",
859 pf->num_alloc_vfs, ICE_MAX_VF_COUNT);
860 return -EIO;
861 }
862
863 if (!num_msix)
864 return -EIO;
865
866
867
868
869
870
871
872
873 num_txq = ice_determine_res(pf, pf->q_left_tx, ICE_DFLT_QS_PER_VF,
874 ICE_MIN_QS_PER_VF);
875
876 num_rxq = ice_determine_res(pf, pf->q_left_rx, ICE_DFLT_QS_PER_VF,
877 ICE_MIN_QS_PER_VF);
878
879 if (!num_txq || !num_rxq)
880 return -EIO;
881
882 if (ice_sriov_set_msix_res(pf, num_msix * pf->num_alloc_vfs))
883 return -EINVAL;
884
885
886
887
888
889 pf->num_vf_qps = min_t(int, num_txq, num_rxq);
890 pf->num_vf_msix = num_msix;
891
892 return 0;
893}
894
895
896
897
898
899
900
901
902
903
904static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
905{
906 struct ice_pf *pf = vf->pf;
907 struct ice_hw *hw;
908 u32 reg;
909
910 hw = &pf->hw;
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
926 reg &= ~VPGEN_VFRTRIG_VFSWR_M;
927 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
928
929
930 if (!ice_alloc_vf_res(vf)) {
931 ice_ena_vf_mappings(vf);
932 set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
933 clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
934 vf->num_vlan = 0;
935 }
936
937
938
939
940
941 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
942}
943
944
945
946
947
948
949
950
951
952
953
954static enum ice_status
955ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
956 bool rm_promisc)
957{
958 struct ice_pf *pf = vf->pf;
959 enum ice_status status = 0;
960 struct ice_hw *hw;
961
962 hw = &pf->hw;
963 if (vf->num_vlan) {
964 status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
965 rm_promisc);
966 } else if (vf->port_vlan_id) {
967 if (rm_promisc)
968 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
969 vf->port_vlan_id);
970 else
971 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
972 vf->port_vlan_id);
973 } else {
974 if (rm_promisc)
975 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
976 0);
977 else
978 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
979 0);
980 }
981
982 return status;
983}
984
985
986
987
988
989
990
991
992
993
994
995
996
997bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
998{
999 struct ice_hw *hw = &pf->hw;
1000 struct ice_vf *vf;
1001 int v, i;
1002
1003
1004 if (!pf->num_alloc_vfs)
1005 return false;
1006
1007
1008 if (test_and_set_bit(__ICE_VF_DIS, pf->state))
1009 return false;
1010
1011
1012 for (v = 0; v < pf->num_alloc_vfs; v++)
1013 ice_trigger_vf_reset(&pf->vf[v], is_vflr);
1014
1015 for (v = 0; v < pf->num_alloc_vfs; v++) {
1016 struct ice_vsi *vsi;
1017
1018 vf = &pf->vf[v];
1019 vsi = pf->vsi[vf->lan_vsi_idx];
1020 if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
1021 ice_vsi_stop_lan_tx_rings(vsi, ICE_VF_RESET, vf->vf_id);
1022 ice_vsi_stop_rx_rings(vsi);
1023 clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
1024 }
1025 }
1026
1027
1028
1029
1030
1031
1032
1033 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1034 usleep_range(10000, 20000);
1035
1036
1037 while (v < pf->num_alloc_vfs) {
1038 u32 reg;
1039
1040 vf = &pf->vf[v];
1041 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1042 if (!(reg & VPGEN_VFRSTAT_VFRD_M))
1043 break;
1044
1045
1046
1047
1048 v++;
1049 }
1050 }
1051
1052
1053
1054
1055 if (v < pf->num_alloc_vfs)
1056 dev_warn(&pf->pdev->dev, "VF reset check timeout\n");
1057 usleep_range(10000, 20000);
1058
1059
1060 for (v = 0; v < pf->num_alloc_vfs; v++) {
1061 vf = &pf->vf[v];
1062
1063 ice_free_vf_res(vf);
1064
1065
1066
1067
1068
1069
1070 vf->num_vf_qs = 0;
1071 }
1072
1073 if (ice_sriov_free_msix_res(pf))
1074 dev_err(&pf->pdev->dev,
1075 "Failed to free MSIX resources used by SR-IOV\n");
1076
1077 if (ice_check_avail_res(pf)) {
1078 dev_err(&pf->pdev->dev,
1079 "Cannot allocate VF resources, try with fewer number of VFs\n");
1080 return false;
1081 }
1082
1083
1084 for (v = 0; v < pf->num_alloc_vfs; v++) {
1085 vf = &pf->vf[v];
1086
1087 vf->num_vf_qs = pf->num_vf_qps;
1088 dev_dbg(&pf->pdev->dev,
1089 "VF-id %d has %d queues configured\n",
1090 vf->vf_id, vf->num_vf_qs);
1091 ice_cleanup_and_realloc_vf(vf);
1092 }
1093
1094 ice_flush(hw);
1095 clear_bit(__ICE_VF_DIS, pf->state);
1096
1097 return true;
1098}
1099
1100
1101
1102
1103
1104
1105
1106
1107static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
1108{
1109 struct ice_pf *pf = vf->pf;
1110 struct ice_vsi *vsi;
1111 struct ice_hw *hw;
1112 bool rsd = false;
1113 u8 promisc_m;
1114 u32 reg;
1115 int i;
1116
1117
1118
1119
1120 if (test_and_set_bit(__ICE_VF_DIS, pf->state))
1121 return false;
1122
1123 ice_trigger_vf_reset(vf, is_vflr);
1124
1125 vsi = pf->vsi[vf->lan_vsi_idx];
1126
1127 if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
1128 ice_vsi_stop_lan_tx_rings(vsi, ICE_VF_RESET, vf->vf_id);
1129 ice_vsi_stop_rx_rings(vsi);
1130 clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
1131 } else {
1132
1133
1134
1135 ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
1136 NULL, ICE_VF_RESET, vf->vf_id, NULL);
1137 }
1138
1139 hw = &pf->hw;
1140
1141
1142
1143 for (i = 0; i < 10; i++) {
1144
1145
1146
1147
1148 usleep_range(10000, 20000);
1149 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1150 if (reg & VPGEN_VFRSTAT_VFRD_M) {
1151 rsd = true;
1152 break;
1153 }
1154 }
1155
1156
1157
1158
1159 if (!rsd)
1160 dev_warn(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1161 vf->vf_id);
1162
1163 usleep_range(10000, 20000);
1164
1165
1166
1167
1168 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
1169 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
1170 if (vf->port_vlan_id || vf->num_vlan)
1171 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
1172 else
1173 promisc_m = ICE_UCAST_PROMISC_BITS;
1174
1175 vsi = pf->vsi[vf->lan_vsi_idx];
1176 if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
1177 dev_err(&pf->pdev->dev, "disabling promiscuous mode failed\n");
1178 }
1179
1180
1181 ice_free_vf_res(vf);
1182
1183 ice_cleanup_and_realloc_vf(vf);
1184
1185 ice_flush(hw);
1186 clear_bit(__ICE_VF_DIS, pf->state);
1187
1188 return true;
1189}
1190
1191
1192
1193
1194
1195void ice_vc_notify_link_state(struct ice_pf *pf)
1196{
1197 int i;
1198
1199 for (i = 0; i < pf->num_alloc_vfs; i++)
1200 ice_vc_notify_vf_link_state(&pf->vf[i]);
1201}
1202
1203
1204
1205
1206
1207
1208
1209void ice_vc_notify_reset(struct ice_pf *pf)
1210{
1211 struct virtchnl_pf_event pfe;
1212
1213 if (!pf->num_alloc_vfs)
1214 return;
1215
1216 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1217 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1218 ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
1219 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
1220}
1221
1222
1223
1224
1225
1226static void ice_vc_notify_vf_reset(struct ice_vf *vf)
1227{
1228 struct virtchnl_pf_event pfe;
1229
1230
1231 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1232 return;
1233
1234
1235 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
1236 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
1237 return;
1238
1239 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1240 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1241 ice_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
1242 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
1243 NULL);
1244}
1245
1246
1247
1248
1249
1250
1251static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
1252{
1253 struct ice_hw *hw = &pf->hw;
1254 struct ice_vf *vfs;
1255 int i, ret;
1256
1257
1258 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
1259 ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
1260
1261 ice_flush(hw);
1262
1263 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1264 if (ret) {
1265 pf->num_alloc_vfs = 0;
1266 goto err_unroll_intr;
1267 }
1268
1269 vfs = devm_kcalloc(&pf->pdev->dev, num_alloc_vfs, sizeof(*vfs),
1270 GFP_KERNEL);
1271 if (!vfs) {
1272 ret = -ENOMEM;
1273 goto err_pci_disable_sriov;
1274 }
1275 pf->vf = vfs;
1276
1277
1278 for (i = 0; i < num_alloc_vfs; i++) {
1279 vfs[i].pf = pf;
1280 vfs[i].vf_sw_id = pf->first_sw;
1281 vfs[i].vf_id = i;
1282
1283
1284 set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1285 vfs[i].spoofchk = true;
1286
1287
1288 set_bit(ICE_VF_STATE_CFG_INTR, vfs[i].vf_states);
1289 }
1290 pf->num_alloc_vfs = num_alloc_vfs;
1291
1292
1293 if (!ice_reset_all_vfs(pf, true)) {
1294 ret = -EIO;
1295 goto err_unroll_sriov;
1296 }
1297
1298 goto err_unroll_intr;
1299
1300err_unroll_sriov:
1301 pf->vf = NULL;
1302 devm_kfree(&pf->pdev->dev, vfs);
1303 vfs = NULL;
1304 pf->num_alloc_vfs = 0;
1305err_pci_disable_sriov:
1306 pci_disable_sriov(pf->pdev);
1307err_unroll_intr:
1308
1309 ice_irq_dynamic_ena(hw, NULL, NULL);
1310 return ret;
1311}
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324static bool ice_pf_state_is_nominal(struct ice_pf *pf)
1325{
1326 DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 };
1327
1328 if (!pf)
1329 return false;
1330
1331 bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS);
1332 if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS))
1333 return false;
1334
1335 return true;
1336}
1337
1338
1339
1340
1341
1342
1343static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
1344{
1345 int pre_existing_vfs = pci_num_vf(pf->pdev);
1346 struct device *dev = &pf->pdev->dev;
1347 int err;
1348
1349 if (!ice_pf_state_is_nominal(pf)) {
1350 dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
1351 return -EBUSY;
1352 }
1353
1354 if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
1355 dev_err(dev, "This device is not capable of SR-IOV\n");
1356 return -ENODEV;
1357 }
1358
1359 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1360 ice_free_vfs(pf);
1361 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1362 return num_vfs;
1363
1364 if (num_vfs > pf->num_vfs_supported) {
1365 dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
1366 num_vfs, pf->num_vfs_supported);
1367 return -ENOTSUPP;
1368 }
1369
1370 dev_info(dev, "Allocating %d VFs\n", num_vfs);
1371 err = ice_alloc_vfs(pf, num_vfs);
1372 if (err) {
1373 dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
1374 return err;
1375 }
1376
1377 set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
1378 return num_vfs;
1379}
1380
1381
1382
1383
1384
1385
1386
1387
1388int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
1389{
1390 struct ice_pf *pf = pci_get_drvdata(pdev);
1391
1392 if (num_vfs)
1393 return ice_pci_sriov_ena(pf, num_vfs);
1394
1395 if (!pci_vfs_assigned(pdev)) {
1396 ice_free_vfs(pf);
1397 } else {
1398 dev_err(&pf->pdev->dev,
1399 "can't free VFs because some are assigned to VMs.\n");
1400 return -EBUSY;
1401 }
1402
1403 return 0;
1404}
1405
1406
1407
1408
1409
1410
1411
1412
1413void ice_process_vflr_event(struct ice_pf *pf)
1414{
1415 struct ice_hw *hw = &pf->hw;
1416 int vf_id;
1417 u32 reg;
1418
1419 if (!test_and_clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
1420 !pf->num_alloc_vfs)
1421 return;
1422
1423 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
1424 struct ice_vf *vf = &pf->vf[vf_id];
1425 u32 reg_idx, bit_idx;
1426
1427 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1428 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1429
1430 reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
1431 if (reg & BIT(bit_idx))
1432
1433 ice_reset_vf(vf, true);
1434 }
1435}
1436
1437
1438
1439
1440
1441
1442
1443static void ice_vc_dis_vf(struct ice_vf *vf)
1444{
1445 ice_vc_notify_vf_reset(vf);
1446 ice_reset_vf(vf, false);
1447}
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459static int
1460ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
1461 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
1462{
1463 enum ice_status aq_ret;
1464 struct ice_pf *pf;
1465
1466
1467 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1468 return -EINVAL;
1469
1470 pf = vf->pf;
1471
1472
1473 if (v_retval) {
1474 vf->num_inval_msgs++;
1475 dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
1476 vf->vf_id, v_opcode, v_retval);
1477 if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
1478 dev_err(&pf->pdev->dev,
1479 "Number of invalid messages exceeded for VF %d\n",
1480 vf->vf_id);
1481 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
1482 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1483 return -EIO;
1484 }
1485 } else {
1486 vf->num_valid_msgs++;
1487
1488 vf->num_inval_msgs = 0;
1489 }
1490
1491 aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
1492 msg, msglen, NULL);
1493 if (aq_ret) {
1494 dev_info(&pf->pdev->dev,
1495 "Unable to send the message to VF %d aq_err %d\n",
1496 vf->vf_id, pf->hw.mailboxq.sq_last_status);
1497 return -EIO;
1498 }
1499
1500 return 0;
1501}
1502
1503
1504
1505
1506
1507
1508
1509
1510static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
1511{
1512 struct virtchnl_version_info info = {
1513 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
1514 };
1515
1516 vf->vf_ver = *(struct virtchnl_version_info *)msg;
1517
1518 if (VF_IS_V10(&vf->vf_ver))
1519 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1520
1521 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
1522 VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
1523 sizeof(struct virtchnl_version_info));
1524}
1525
1526
1527
1528
1529
1530
1531
1532
1533static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
1534{
1535 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1536 struct virtchnl_vf_resource *vfres = NULL;
1537 struct ice_pf *pf = vf->pf;
1538 struct ice_vsi *vsi;
1539 int len = 0;
1540 int ret;
1541
1542 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
1543 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1544 goto err;
1545 }
1546
1547 len = sizeof(struct virtchnl_vf_resource);
1548
1549 vfres = devm_kzalloc(&pf->pdev->dev, len, GFP_KERNEL);
1550 if (!vfres) {
1551 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1552 len = 0;
1553 goto err;
1554 }
1555 if (VF_IS_V11(&vf->vf_ver))
1556 vf->driver_caps = *(u32 *)msg;
1557 else
1558 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
1559 VIRTCHNL_VF_OFFLOAD_RSS_REG |
1560 VIRTCHNL_VF_OFFLOAD_VLAN;
1561
1562 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
1563 vsi = pf->vsi[vf->lan_vsi_idx];
1564 if (!vsi) {
1565 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1566 goto err;
1567 }
1568
1569 if (!vsi->info.pvid)
1570 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
1571
1572 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1573 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
1574 } else {
1575 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
1576 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
1577 else
1578 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
1579 }
1580
1581 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1582 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
1583
1584 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
1585 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
1586
1587 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
1588 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
1589
1590 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
1591 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
1592
1593 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1594 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
1595
1596 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
1597 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
1598
1599 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
1600 vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
1601
1602 vfres->num_vsis = 1;
1603
1604 vfres->num_queue_pairs = vsi->num_txq;
1605 vfres->max_vectors = pf->num_vf_msix;
1606 vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
1607 vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
1608
1609 vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
1610 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
1611 vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
1612 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
1613 vf->dflt_lan_addr.addr);
1614
1615 set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
1616
1617err:
1618
1619 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
1620 (u8 *)vfres, len);
1621
1622 devm_kfree(&pf->pdev->dev, vfres);
1623 return ret;
1624}
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634static void ice_vc_reset_vf_msg(struct ice_vf *vf)
1635{
1636 if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
1637 ice_reset_vf(vf, false);
1638}
1639
1640
1641
1642
1643
1644
1645
1646
1647static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
1648{
1649 int i;
1650
1651 ice_for_each_vsi(pf, i)
1652 if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
1653 return pf->vsi[i];
1654
1655 return NULL;
1656}
1657
1658
1659
1660
1661
1662
1663
1664
1665static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
1666{
1667 struct ice_pf *pf = vf->pf;
1668 struct ice_vsi *vsi;
1669
1670 vsi = ice_find_vsi_from_id(pf, vsi_id);
1671
1672 return (vsi && (vsi->vf_id == vf->vf_id));
1673}
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
1684{
1685 struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
1686
1687 return (vsi && (qid < vsi->alloc_txq));
1688}
1689
1690
1691
1692
1693
1694
1695
1696
1697static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
1698{
1699 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1700 struct virtchnl_rss_key *vrk =
1701 (struct virtchnl_rss_key *)msg;
1702 struct ice_pf *pf = vf->pf;
1703 struct ice_vsi *vsi = NULL;
1704
1705 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1706 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1707 goto error_param;
1708 }
1709
1710 if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
1711 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1712 goto error_param;
1713 }
1714
1715 vsi = pf->vsi[vf->lan_vsi_idx];
1716 if (!vsi) {
1717 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1718 goto error_param;
1719 }
1720
1721 if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
1722 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1723 goto error_param;
1724 }
1725
1726 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
1727 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1728 goto error_param;
1729 }
1730
1731 if (ice_set_rss(vsi, vrk->key, NULL, 0))
1732 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1733error_param:
1734 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
1735 NULL, 0);
1736}
1737
1738
1739
1740
1741
1742
1743
1744
1745static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
1746{
1747 struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
1748 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1749 struct ice_pf *pf = vf->pf;
1750 struct ice_vsi *vsi = NULL;
1751
1752 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1753 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1754 goto error_param;
1755 }
1756
1757 if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
1758 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1759 goto error_param;
1760 }
1761
1762 vsi = pf->vsi[vf->lan_vsi_idx];
1763 if (!vsi) {
1764 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1765 goto error_param;
1766 }
1767
1768 if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
1769 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1770 goto error_param;
1771 }
1772
1773 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
1774 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1775 goto error_param;
1776 }
1777
1778 if (ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
1779 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1780error_param:
1781 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
1782 NULL, 0);
1783}
1784
1785
1786
1787
1788
1789
1790
1791
1792static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
1793{
1794 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1795 struct virtchnl_queue_select *vqs =
1796 (struct virtchnl_queue_select *)msg;
1797 struct ice_pf *pf = vf->pf;
1798 struct ice_eth_stats stats;
1799 struct ice_vsi *vsi;
1800
1801 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1802 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1803 goto error_param;
1804 }
1805
1806 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1807 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1808 goto error_param;
1809 }
1810
1811 vsi = pf->vsi[vf->lan_vsi_idx];
1812 if (!vsi) {
1813 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1814 goto error_param;
1815 }
1816
1817 memset(&stats, 0, sizeof(struct ice_eth_stats));
1818 ice_update_eth_stats(vsi);
1819
1820 stats = vsi->eth_stats;
1821
1822error_param:
1823
1824 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
1825 (u8 *)&stats, sizeof(stats));
1826}
1827
1828
1829
1830
1831
1832
1833
1834
1835static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
1836{
1837 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1838 struct virtchnl_queue_select *vqs =
1839 (struct virtchnl_queue_select *)msg;
1840 struct ice_pf *pf = vf->pf;
1841 struct ice_vsi *vsi;
1842
1843 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1844 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1845 goto error_param;
1846 }
1847
1848 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1849 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1850 goto error_param;
1851 }
1852
1853 if (!vqs->rx_queues && !vqs->tx_queues) {
1854 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1855 goto error_param;
1856 }
1857
1858 vsi = pf->vsi[vf->lan_vsi_idx];
1859 if (!vsi) {
1860 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1861 goto error_param;
1862 }
1863
1864
1865
1866
1867
1868 if (ice_vsi_start_rx_rings(vsi))
1869 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1870
1871
1872 if (v_ret == VIRTCHNL_STATUS_SUCCESS)
1873 set_bit(ICE_VF_STATE_ENA, vf->vf_states);
1874
1875error_param:
1876
1877 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
1878 NULL, 0);
1879}
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
1890{
1891 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1892 struct virtchnl_queue_select *vqs =
1893 (struct virtchnl_queue_select *)msg;
1894 struct ice_pf *pf = vf->pf;
1895 struct ice_vsi *vsi;
1896
1897 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
1898 !test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
1899 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1900 goto error_param;
1901 }
1902
1903 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1904 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1905 goto error_param;
1906 }
1907
1908 if (!vqs->rx_queues && !vqs->tx_queues) {
1909 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1910 goto error_param;
1911 }
1912
1913 vsi = pf->vsi[vf->lan_vsi_idx];
1914 if (!vsi) {
1915 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1916 goto error_param;
1917 }
1918
1919 if (ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id)) {
1920 dev_err(&vsi->back->pdev->dev,
1921 "Failed to stop tx rings on VSI %d\n",
1922 vsi->vsi_num);
1923 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1924 }
1925
1926 if (ice_vsi_stop_rx_rings(vsi)) {
1927 dev_err(&vsi->back->pdev->dev,
1928 "Failed to stop rx rings on VSI %d\n",
1929 vsi->vsi_num);
1930 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1931 }
1932
1933
1934 if (v_ret == VIRTCHNL_STATUS_SUCCESS)
1935 clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
1936
1937error_param:
1938
1939 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
1940 NULL, 0);
1941}
1942
1943
1944
1945
1946
1947
1948
1949
1950static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
1951{
1952 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1953 struct virtchnl_irq_map_info *irqmap_info;
1954 u16 vsi_id, vsi_q_id, vector_id;
1955 struct virtchnl_vector_map *map;
1956 struct ice_pf *pf = vf->pf;
1957 u16 num_q_vectors_mapped;
1958 struct ice_vsi *vsi;
1959 unsigned long qmap;
1960 int i;
1961
1962 irqmap_info = (struct virtchnl_irq_map_info *)msg;
1963 num_q_vectors_mapped = irqmap_info->num_vectors;
1964
1965 vsi = pf->vsi[vf->lan_vsi_idx];
1966 if (!vsi) {
1967 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1968 goto error_param;
1969 }
1970
1971
1972
1973
1974
1975 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
1976 pf->num_vf_msix < num_q_vectors_mapped ||
1977 !irqmap_info->num_vectors) {
1978 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1979 goto error_param;
1980 }
1981
1982 for (i = 0; i < num_q_vectors_mapped; i++) {
1983 struct ice_q_vector *q_vector;
1984
1985 map = &irqmap_info->vecmap[i];
1986
1987 vector_id = map->vector_id;
1988 vsi_id = map->vsi_id;
1989
1990 if (!(vector_id < pf->hw.func_caps.common_cap
1991 .num_msix_vectors) || !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
1992 (!vector_id && (map->rxq_map || map->txq_map))) {
1993 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1994 goto error_param;
1995 }
1996
1997
1998 if (!vector_id)
1999 continue;
2000
2001
2002
2003
2004 q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
2005 if (!q_vector) {
2006 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2007 goto error_param;
2008 }
2009
2010
2011 qmap = map->rxq_map;
2012 q_vector->num_ring_rx = 0;
2013 for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
2014 if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
2015 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2016 goto error_param;
2017 }
2018 q_vector->num_ring_rx++;
2019 q_vector->rx.itr_idx = map->rxitr_idx;
2020 vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
2021 ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
2022 q_vector->rx.itr_idx);
2023 }
2024
2025 qmap = map->txq_map;
2026 q_vector->num_ring_tx = 0;
2027 for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
2028 if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
2029 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2030 goto error_param;
2031 }
2032 q_vector->num_ring_tx++;
2033 q_vector->tx.itr_idx = map->txitr_idx;
2034 vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
2035 ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
2036 q_vector->tx.itr_idx);
2037 }
2038 }
2039
2040error_param:
2041
2042 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
2043 NULL, 0);
2044}
2045
2046
2047
2048
2049
2050
2051
2052
2053static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
2054{
2055 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2056 struct virtchnl_vsi_queue_config_info *qci =
2057 (struct virtchnl_vsi_queue_config_info *)msg;
2058 struct virtchnl_queue_pair_info *qpi;
2059 struct ice_pf *pf = vf->pf;
2060 struct ice_vsi *vsi;
2061 int i;
2062
2063 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2064 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2065 goto error_param;
2066 }
2067
2068 if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
2069 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2070 goto error_param;
2071 }
2072
2073 vsi = pf->vsi[vf->lan_vsi_idx];
2074 if (!vsi)
2075 goto error_param;
2076
2077 if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF) {
2078 dev_err(&pf->pdev->dev,
2079 "VF-%d requesting more than supported number of queues: %d\n",
2080 vf->vf_id, qci->num_queue_pairs);
2081 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2082 goto error_param;
2083 }
2084
2085 for (i = 0; i < qci->num_queue_pairs; i++) {
2086 qpi = &qci->qpair[i];
2087 if (qpi->txq.vsi_id != qci->vsi_id ||
2088 qpi->rxq.vsi_id != qci->vsi_id ||
2089 qpi->rxq.queue_id != qpi->txq.queue_id ||
2090 !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
2091 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2092 goto error_param;
2093 }
2094
2095 vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
2096 vsi->tx_rings[i]->count = qpi->txq.ring_len;
2097
2098 vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
2099 vsi->rx_rings[i]->count = qpi->rxq.ring_len;
2100 if (qpi->rxq.databuffer_size > ((16 * 1024) - 128)) {
2101 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2102 goto error_param;
2103 }
2104 vsi->rx_buf_len = qpi->rxq.databuffer_size;
2105 if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
2106 qpi->rxq.max_pkt_size < 64) {
2107 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2108 goto error_param;
2109 }
2110 vsi->max_frame = qpi->rxq.max_pkt_size;
2111 }
2112
2113
2114
2115
2116 vsi->num_txq = qci->num_queue_pairs;
2117 vsi->num_rxq = qci->num_queue_pairs;
2118
2119 vsi->tc_cfg.tc_info[0].qcount_tx = qci->num_queue_pairs;
2120 vsi->tc_cfg.tc_info[0].qcount_rx = qci->num_queue_pairs;
2121
2122 if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi))
2123 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2124
2125error_param:
2126
2127 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
2128 NULL, 0);
2129}
2130
2131
2132
2133
2134
2135static bool ice_is_vf_trusted(struct ice_vf *vf)
2136{
2137 return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
2138}
2139
2140
2141
2142
2143
2144
2145
2146static bool ice_can_vf_change_mac(struct ice_vf *vf)
2147{
2148
2149
2150
2151
2152 if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
2153 return false;
2154
2155 return true;
2156}
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166static int
2167ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
2168{
2169 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2170 struct virtchnl_ether_addr_list *al =
2171 (struct virtchnl_ether_addr_list *)msg;
2172 struct ice_pf *pf = vf->pf;
2173 enum virtchnl_ops vc_op;
2174 LIST_HEAD(mac_list);
2175 struct ice_vsi *vsi;
2176 int mac_count = 0;
2177 int i;
2178
2179 if (set)
2180 vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
2181 else
2182 vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
2183
2184 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
2185 !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
2186 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2187 goto handle_mac_exit;
2188 }
2189
2190 if (set && !ice_is_vf_trusted(vf) &&
2191 (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
2192 dev_err(&pf->pdev->dev,
2193 "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
2194 vf->vf_id);
2195
2196
2197
2198 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2199 goto handle_mac_exit;
2200 }
2201
2202 vsi = pf->vsi[vf->lan_vsi_idx];
2203 if (!vsi) {
2204 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2205 goto handle_mac_exit;
2206 }
2207
2208 for (i = 0; i < al->num_elements; i++) {
2209 u8 *maddr = al->list[i].addr;
2210
2211 if (ether_addr_equal(maddr, vf->dflt_lan_addr.addr) ||
2212 is_broadcast_ether_addr(maddr)) {
2213 if (set) {
2214
2215
2216
2217 dev_info(&pf->pdev->dev,
2218 "MAC %pM already set for VF %d\n",
2219 maddr, vf->vf_id);
2220 continue;
2221 } else {
2222
2223 dev_err(&pf->pdev->dev,
2224 "VF can't remove default MAC address or MAC %pM programmed by PF for VF %d\n",
2225 maddr, vf->vf_id);
2226 continue;
2227 }
2228 }
2229
2230
2231 if (is_zero_ether_addr(maddr)) {
2232 dev_err(&pf->pdev->dev,
2233 "invalid MAC %pM provided for VF %d\n",
2234 maddr, vf->vf_id);
2235 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2236 goto handle_mac_exit;
2237 }
2238
2239 if (is_unicast_ether_addr(maddr) &&
2240 !ice_can_vf_change_mac(vf)) {
2241 dev_err(&pf->pdev->dev,
2242 "can't change unicast MAC for untrusted VF %d\n",
2243 vf->vf_id);
2244 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2245 goto handle_mac_exit;
2246 }
2247
2248
2249 if (ice_add_mac_to_list(vsi, &mac_list, al->list[i].addr)) {
2250 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2251 goto handle_mac_exit;
2252 }
2253 mac_count++;
2254 }
2255
2256
2257 if (set)
2258 v_ret = ice_err_to_virt_err(ice_add_mac(&pf->hw, &mac_list));
2259 else
2260 v_ret = ice_err_to_virt_err(ice_remove_mac(&pf->hw, &mac_list));
2261
2262 if (v_ret) {
2263 dev_err(&pf->pdev->dev,
2264 "can't update MAC filters for VF %d, error %d\n",
2265 vf->vf_id, v_ret);
2266 } else {
2267 if (set)
2268 vf->num_mac += mac_count;
2269 else
2270 vf->num_mac -= mac_count;
2271 }
2272
2273handle_mac_exit:
2274 ice_free_fltr_list(&pf->pdev->dev, &mac_list);
2275
2276 return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
2277}
2278
2279
2280
2281
2282
2283
2284
2285
2286static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
2287{
2288 return ice_vc_handle_mac_addr_msg(vf, msg, true);
2289}
2290
2291
2292
2293
2294
2295
2296
2297
2298static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
2299{
2300 return ice_vc_handle_mac_addr_msg(vf, msg, false);
2301}
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
2314{
2315 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2316 struct virtchnl_vf_res_request *vfres =
2317 (struct virtchnl_vf_res_request *)msg;
2318 int req_queues = vfres->num_queue_pairs;
2319 struct ice_pf *pf = vf->pf;
2320 int max_allowed_vf_queues;
2321 int tx_rx_queue_left;
2322 int cur_queues;
2323
2324 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2325 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2326 goto error_param;
2327 }
2328
2329 cur_queues = vf->num_vf_qs;
2330 tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx);
2331 max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
2332 if (req_queues <= 0) {
2333 dev_err(&pf->pdev->dev,
2334 "VF %d tried to request %d queues. Ignoring.\n",
2335 vf->vf_id, req_queues);
2336 } else if (req_queues > ICE_MAX_BASE_QS_PER_VF) {
2337 dev_err(&pf->pdev->dev,
2338 "VF %d tried to request more than %d queues.\n",
2339 vf->vf_id, ICE_MAX_BASE_QS_PER_VF);
2340 vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF;
2341 } else if (req_queues - cur_queues > tx_rx_queue_left) {
2342 dev_warn(&pf->pdev->dev,
2343 "VF %d requested %d more queues, but only %d left.\n",
2344 vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
2345 vfres->num_queue_pairs = min_t(int, max_allowed_vf_queues,
2346 ICE_MAX_BASE_QS_PER_VF);
2347 } else {
2348
2349 vf->num_req_qs = req_queues;
2350 ice_vc_dis_vf(vf);
2351 dev_info(&pf->pdev->dev,
2352 "VF %d granted request of %d queues.\n",
2353 vf->vf_id, req_queues);
2354 return 0;
2355 }
2356
2357error_param:
2358
2359 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
2360 v_ret, (u8 *)vfres, sizeof(*vfres));
2361}
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373int
2374ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
2375 __be16 vlan_proto)
2376{
2377 u16 vlanprio = vlan_id | (qos << ICE_VLAN_PRIORITY_S);
2378 struct ice_netdev_priv *np = netdev_priv(netdev);
2379 struct ice_pf *pf = np->vsi->back;
2380 struct ice_vsi *vsi;
2381 struct ice_vf *vf;
2382 int ret = 0;
2383
2384
2385 if (vf_id >= pf->num_alloc_vfs) {
2386 dev_err(&pf->pdev->dev, "invalid VF id: %d\n", vf_id);
2387 return -EINVAL;
2388 }
2389
2390 if (vlan_id > ICE_MAX_VLANID || qos > 7) {
2391 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
2392 return -EINVAL;
2393 }
2394
2395 if (vlan_proto != htons(ETH_P_8021Q)) {
2396 dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
2397 return -EPROTONOSUPPORT;
2398 }
2399
2400 vf = &pf->vf[vf_id];
2401 vsi = pf->vsi[vf->lan_vsi_idx];
2402 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
2403 dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
2404 return -EBUSY;
2405 }
2406
2407 if (le16_to_cpu(vsi->info.pvid) == vlanprio) {
2408
2409 dev_info(&pf->pdev->dev,
2410 "Duplicate pvid %d request\n", vlanprio);
2411 return ret;
2412 }
2413
2414
2415 if (vsi->info.pvid)
2416 ice_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
2417 VLAN_VID_MASK));
2418
2419 if (vlan_id || qos) {
2420 ret = ice_vsi_manage_pvid(vsi, vlanprio, true);
2421 if (ret)
2422 goto error_set_pvid;
2423 } else {
2424 ice_vsi_manage_pvid(vsi, 0, false);
2425 vsi->info.pvid = 0;
2426 }
2427
2428 if (vlan_id) {
2429 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
2430 vlan_id, qos, vf_id);
2431
2432
2433 ret = ice_vsi_add_vlan(vsi, vlan_id);
2434 if (ret)
2435 goto error_set_pvid;
2436 }
2437
2438
2439
2440
2441 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
2442
2443error_set_pvid:
2444 return ret;
2445}
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
2456{
2457 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2458 struct virtchnl_vlan_filter_list *vfl =
2459 (struct virtchnl_vlan_filter_list *)msg;
2460 struct ice_pf *pf = vf->pf;
2461 bool vlan_promisc = false;
2462 struct ice_vsi *vsi;
2463 struct ice_hw *hw;
2464 int status = 0;
2465 u8 promisc_m;
2466 int i;
2467
2468 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2469 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2470 goto error_param;
2471 }
2472
2473 if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
2474 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2475 goto error_param;
2476 }
2477
2478 if (add_v && !ice_is_vf_trusted(vf) &&
2479 vf->num_vlan >= ICE_MAX_VLAN_PER_VF) {
2480 dev_info(&pf->pdev->dev,
2481 "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
2482 vf->vf_id);
2483
2484
2485
2486 goto error_param;
2487 }
2488
2489 for (i = 0; i < vfl->num_elements; i++) {
2490 if (vfl->vlan_id[i] > ICE_MAX_VLANID) {
2491 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2492 dev_err(&pf->pdev->dev,
2493 "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
2494 goto error_param;
2495 }
2496 }
2497
2498 hw = &pf->hw;
2499 vsi = pf->vsi[vf->lan_vsi_idx];
2500 if (!vsi) {
2501 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2502 goto error_param;
2503 }
2504
2505 if (vsi->info.pvid) {
2506 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2507 goto error_param;
2508 }
2509
2510 if (ice_vsi_manage_vlan_stripping(vsi, add_v)) {
2511 dev_err(&pf->pdev->dev,
2512 "%sable VLAN stripping failed for VSI %i\n",
2513 add_v ? "en" : "dis", vsi->vsi_num);
2514 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2515 goto error_param;
2516 }
2517
2518 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
2519 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
2520 vlan_promisc = true;
2521
2522 if (add_v) {
2523 for (i = 0; i < vfl->num_elements; i++) {
2524 u16 vid = vfl->vlan_id[i];
2525
2526 if (!ice_is_vf_trusted(vf) &&
2527 vf->num_vlan >= ICE_MAX_VLAN_PER_VF) {
2528 dev_info(&pf->pdev->dev,
2529 "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
2530 vf->vf_id);
2531
2532
2533
2534
2535 goto error_param;
2536 }
2537
2538 if (ice_vsi_add_vlan(vsi, vid)) {
2539 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2540 goto error_param;
2541 }
2542
2543 vf->num_vlan++;
2544
2545 if (!vlan_promisc) {
2546 status = ice_cfg_vlan_pruning(vsi, true, false);
2547 if (status) {
2548 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2549 dev_err(&pf->pdev->dev,
2550 "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
2551 vid, status);
2552 goto error_param;
2553 }
2554 } else {
2555
2556 promisc_m = ICE_PROMISC_VLAN_TX |
2557 ICE_PROMISC_VLAN_RX;
2558
2559 status = ice_set_vsi_promisc(hw, vsi->idx,
2560 promisc_m, vid);
2561 if (status) {
2562 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2563 dev_err(&pf->pdev->dev,
2564 "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
2565 vid, status);
2566 }
2567 }
2568 }
2569 } else {
2570
2571
2572
2573
2574
2575
2576
2577 int num_vf_vlan;
2578
2579 num_vf_vlan = vf->num_vlan;
2580 for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
2581 u16 vid = vfl->vlan_id[i];
2582
2583
2584
2585
2586 if (ice_vsi_kill_vlan(vsi, vid)) {
2587 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2588 goto error_param;
2589 }
2590
2591 vf->num_vlan--;
2592
2593 ice_cfg_vlan_pruning(vsi, false, false);
2594
2595
2596 if (vlan_promisc) {
2597 promisc_m = ICE_PROMISC_VLAN_TX |
2598 ICE_PROMISC_VLAN_RX;
2599
2600 ice_clear_vsi_promisc(hw, vsi->idx,
2601 promisc_m, vid);
2602 }
2603 }
2604 }
2605
2606error_param:
2607
2608 if (add_v)
2609 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
2610 NULL, 0);
2611 else
2612 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
2613 NULL, 0);
2614}
2615
2616
2617
2618
2619
2620
2621
2622
2623static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
2624{
2625 return ice_vc_process_vlan_msg(vf, msg, true);
2626}
2627
2628
2629
2630
2631
2632
2633
2634
2635static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
2636{
2637 return ice_vc_process_vlan_msg(vf, msg, false);
2638}
2639
2640
2641
2642
2643
2644
2645
2646static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
2647{
2648 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2649 struct ice_pf *pf = vf->pf;
2650 struct ice_vsi *vsi;
2651
2652 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2653 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2654 goto error_param;
2655 }
2656
2657 vsi = pf->vsi[vf->lan_vsi_idx];
2658 if (ice_vsi_manage_vlan_stripping(vsi, true))
2659 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2660
2661error_param:
2662 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
2663 v_ret, NULL, 0);
2664}
2665
2666
2667
2668
2669
2670
2671
2672static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
2673{
2674 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2675 struct ice_pf *pf = vf->pf;
2676 struct ice_vsi *vsi;
2677
2678 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2679 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2680 goto error_param;
2681 }
2682
2683 vsi = pf->vsi[vf->lan_vsi_idx];
2684 if (!vsi) {
2685 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2686 goto error_param;
2687 }
2688
2689 if (ice_vsi_manage_vlan_stripping(vsi, false))
2690 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2691
2692error_param:
2693 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
2694 v_ret, NULL, 0);
2695}
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
2706{
2707 u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
2708 s16 vf_id = le16_to_cpu(event->desc.retval);
2709 u16 msglen = event->msg_len;
2710 u8 *msg = event->msg_buf;
2711 struct ice_vf *vf = NULL;
2712 int err = 0;
2713
2714 if (vf_id >= pf->num_alloc_vfs) {
2715 err = -EINVAL;
2716 goto error_handler;
2717 }
2718
2719 vf = &pf->vf[vf_id];
2720
2721
2722 if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
2723 err = -EPERM;
2724 goto error_handler;
2725 }
2726
2727
2728 err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
2729 if (err) {
2730 if (err == VIRTCHNL_STATUS_ERR_PARAM)
2731 err = -EPERM;
2732 else
2733 err = -EINVAL;
2734 goto error_handler;
2735 }
2736
2737
2738 if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) {
2739 struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg;
2740
2741 if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE)
2742 err = -EINVAL;
2743 } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) {
2744 struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
2745
2746 if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE)
2747 err = -EINVAL;
2748 }
2749
2750error_handler:
2751 if (err) {
2752 ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
2753 NULL, 0);
2754 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
2755 vf_id, v_opcode, msglen, err);
2756 return;
2757 }
2758
2759 switch (v_opcode) {
2760 case VIRTCHNL_OP_VERSION:
2761 err = ice_vc_get_ver_msg(vf, msg);
2762 break;
2763 case VIRTCHNL_OP_GET_VF_RESOURCES:
2764 err = ice_vc_get_vf_res_msg(vf, msg);
2765 break;
2766 case VIRTCHNL_OP_RESET_VF:
2767 ice_vc_reset_vf_msg(vf);
2768 break;
2769 case VIRTCHNL_OP_ADD_ETH_ADDR:
2770 err = ice_vc_add_mac_addr_msg(vf, msg);
2771 break;
2772 case VIRTCHNL_OP_DEL_ETH_ADDR:
2773 err = ice_vc_del_mac_addr_msg(vf, msg);
2774 break;
2775 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
2776 err = ice_vc_cfg_qs_msg(vf, msg);
2777 break;
2778 case VIRTCHNL_OP_ENABLE_QUEUES:
2779 err = ice_vc_ena_qs_msg(vf, msg);
2780 ice_vc_notify_vf_link_state(vf);
2781 break;
2782 case VIRTCHNL_OP_DISABLE_QUEUES:
2783 err = ice_vc_dis_qs_msg(vf, msg);
2784 break;
2785 case VIRTCHNL_OP_REQUEST_QUEUES:
2786 err = ice_vc_request_qs_msg(vf, msg);
2787 break;
2788 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
2789 err = ice_vc_cfg_irq_map_msg(vf, msg);
2790 break;
2791 case VIRTCHNL_OP_CONFIG_RSS_KEY:
2792 err = ice_vc_config_rss_key(vf, msg);
2793 break;
2794 case VIRTCHNL_OP_CONFIG_RSS_LUT:
2795 err = ice_vc_config_rss_lut(vf, msg);
2796 break;
2797 case VIRTCHNL_OP_GET_STATS:
2798 err = ice_vc_get_stats_msg(vf, msg);
2799 break;
2800 case VIRTCHNL_OP_ADD_VLAN:
2801 err = ice_vc_add_vlan_msg(vf, msg);
2802 break;
2803 case VIRTCHNL_OP_DEL_VLAN:
2804 err = ice_vc_remove_vlan_msg(vf, msg);
2805 break;
2806 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
2807 err = ice_vc_ena_vlan_stripping(vf);
2808 break;
2809 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
2810 err = ice_vc_dis_vlan_stripping(vf);
2811 break;
2812 case VIRTCHNL_OP_UNKNOWN:
2813 default:
2814 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
2815 v_opcode, vf_id);
2816 err = ice_vc_send_msg_to_vf(vf, v_opcode,
2817 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
2818 NULL, 0);
2819 break;
2820 }
2821 if (err) {
2822
2823
2824
2825 dev_info(&pf->pdev->dev,
2826 "PF failed to honor VF %d, opcode %d, error %d\n",
2827 vf_id, v_opcode, err);
2828 }
2829}
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839int
2840ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
2841{
2842 struct ice_netdev_priv *np = netdev_priv(netdev);
2843 struct ice_vsi *vsi = np->vsi;
2844 struct ice_pf *pf = vsi->back;
2845 struct ice_vf *vf;
2846
2847
2848 if (vf_id >= pf->num_alloc_vfs) {
2849 netdev_err(netdev, "invalid VF id: %d\n", vf_id);
2850 return -EINVAL;
2851 }
2852
2853 vf = &pf->vf[vf_id];
2854 vsi = pf->vsi[vf->lan_vsi_idx];
2855
2856 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
2857 netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
2858 return -EBUSY;
2859 }
2860
2861 ivi->vf = vf_id;
2862 ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr);
2863
2864
2865 ivi->vlan = le16_to_cpu(vsi->info.pvid) & ICE_VLAN_M;
2866 ivi->qos = (le16_to_cpu(vsi->info.pvid) & ICE_PRIORITY_M) >>
2867 ICE_VLAN_PRIORITY_S;
2868
2869 ivi->trusted = vf->trusted;
2870 ivi->spoofchk = vf->spoofchk;
2871 if (!vf->link_forced)
2872 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
2873 else if (vf->link_up)
2874 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
2875 else
2876 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
2877 ivi->max_tx_rate = vf->tx_rate;
2878 ivi->min_tx_rate = 0;
2879 return 0;
2880}
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
2891{
2892 struct ice_netdev_priv *np = netdev_priv(netdev);
2893 struct ice_vsi *vsi = np->vsi;
2894 struct ice_pf *pf = vsi->back;
2895 struct ice_vsi_ctx *ctx;
2896 enum ice_status status;
2897 struct ice_vf *vf;
2898 int ret = 0;
2899
2900
2901 if (vf_id >= pf->num_alloc_vfs) {
2902 netdev_err(netdev, "invalid VF id: %d\n", vf_id);
2903 return -EINVAL;
2904 }
2905
2906 vf = &pf->vf[vf_id];
2907 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
2908 netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
2909 return -EBUSY;
2910 }
2911
2912 if (ena == vf->spoofchk) {
2913 dev_dbg(&pf->pdev->dev, "VF spoofchk already %s\n",
2914 ena ? "ON" : "OFF");
2915 return 0;
2916 }
2917
2918 ctx = devm_kzalloc(&pf->pdev->dev, sizeof(*ctx), GFP_KERNEL);
2919 if (!ctx)
2920 return -ENOMEM;
2921
2922 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
2923
2924 if (ena) {
2925 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
2926 ctx->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M;
2927 }
2928
2929 status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
2930 if (status) {
2931 dev_dbg(&pf->pdev->dev,
2932 "Error %d, failed to update VSI* parameters\n", status);
2933 ret = -EIO;
2934 goto out;
2935 }
2936
2937 vf->spoofchk = ena;
2938 vsi->info.sec_flags = ctx->info.sec_flags;
2939 vsi->info.sw_flags2 = ctx->info.sw_flags2;
2940out:
2941 devm_kfree(&pf->pdev->dev, ctx);
2942 return ret;
2943}
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
2954{
2955 struct ice_netdev_priv *np = netdev_priv(netdev);
2956 struct ice_vsi *vsi = np->vsi;
2957 struct ice_pf *pf = vsi->back;
2958 struct ice_vf *vf;
2959 int ret = 0;
2960
2961
2962 if (vf_id >= pf->num_alloc_vfs) {
2963 netdev_err(netdev, "invalid VF id: %d\n", vf_id);
2964 return -EINVAL;
2965 }
2966
2967 vf = &pf->vf[vf_id];
2968 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
2969 netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
2970 return -EBUSY;
2971 }
2972
2973 if (is_zero_ether_addr(mac) || is_multicast_ether_addr(mac)) {
2974 netdev_err(netdev, "%pM not a valid unicast address\n", mac);
2975 return -EINVAL;
2976 }
2977
2978
2979
2980
2981
2982
2983 ether_addr_copy(vf->dflt_lan_addr.addr, mac);
2984 vf->pf_set_mac = true;
2985 netdev_info(netdev,
2986 "MAC on VF %d set to %pM. VF driver will be reinitialized\n",
2987 vf_id, mac);
2988
2989 ice_vc_dis_vf(vf);
2990 return ret;
2991}
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
3002{
3003 struct ice_netdev_priv *np = netdev_priv(netdev);
3004 struct ice_vsi *vsi = np->vsi;
3005 struct ice_pf *pf = vsi->back;
3006 struct ice_vf *vf;
3007
3008
3009 if (vf_id >= pf->num_alloc_vfs) {
3010 dev_err(&pf->pdev->dev, "invalid VF id: %d\n", vf_id);
3011 return -EINVAL;
3012 }
3013
3014 vf = &pf->vf[vf_id];
3015 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
3016 dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
3017 return -EBUSY;
3018 }
3019
3020
3021 if (trusted == vf->trusted)
3022 return 0;
3023
3024 vf->trusted = trusted;
3025 ice_vc_dis_vf(vf);
3026 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
3027 vf_id, trusted ? "" : "un");
3028
3029 return 0;
3030}
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
3041{
3042 struct ice_netdev_priv *np = netdev_priv(netdev);
3043 struct ice_pf *pf = np->vsi->back;
3044 struct virtchnl_pf_event pfe = { 0 };
3045 struct ice_link_status *ls;
3046 struct ice_vf *vf;
3047 struct ice_hw *hw;
3048
3049 if (vf_id >= pf->num_alloc_vfs) {
3050 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
3051 return -EINVAL;
3052 }
3053
3054 vf = &pf->vf[vf_id];
3055 hw = &pf->hw;
3056 ls = &pf->hw.port_info->phy.link_info;
3057
3058 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
3059 dev_err(&pf->pdev->dev, "vf %d in reset. Try again.\n", vf_id);
3060 return -EBUSY;
3061 }
3062
3063 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
3064 pfe.severity = PF_EVENT_SEVERITY_INFO;
3065
3066 switch (link_state) {
3067 case IFLA_VF_LINK_STATE_AUTO:
3068 vf->link_forced = false;
3069 vf->link_up = ls->link_info & ICE_AQ_LINK_UP;
3070 break;
3071 case IFLA_VF_LINK_STATE_ENABLE:
3072 vf->link_forced = true;
3073 vf->link_up = true;
3074 break;
3075 case IFLA_VF_LINK_STATE_DISABLE:
3076 vf->link_forced = true;
3077 vf->link_up = false;
3078 break;
3079 default:
3080 return -EINVAL;
3081 }
3082
3083 if (vf->link_forced)
3084 ice_set_pfe_link_forced(vf, &pfe, vf->link_up);
3085 else
3086 ice_set_pfe_link(vf, &pfe, ls->link_speed, vf->link_up);
3087
3088
3089 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
3090 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
3091 sizeof(pfe), NULL);
3092
3093 return 0;
3094}
3095