1
2
3
4#include "i40e.h"
5
6
7
8
9
10
11
12
13
14
15
16
17
18static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
19 enum virtchnl_ops v_opcode,
20 i40e_status v_retval, u8 *msg,
21 u16 msglen)
22{
23 struct i40e_hw *hw = &pf->hw;
24 struct i40e_vf *vf = pf->vf;
25 int i;
26
27 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
28 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
29
30 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
31 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
32 continue;
33
34
35
36
37 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
38 msg, msglen, NULL);
39 }
40}
41
42
43
44
45
46
47
48static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
49{
50 struct virtchnl_pf_event pfe;
51 struct i40e_pf *pf = vf->pf;
52 struct i40e_hw *hw = &pf->hw;
53 struct i40e_link_status *ls = &pf->hw.phy.link_info;
54 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
55
56 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
57 pfe.severity = PF_EVENT_SEVERITY_INFO;
58
59
60 if (!vf->queues_enabled) {
61 pfe.event_data.link_event.link_status = false;
62 pfe.event_data.link_event.link_speed = 0;
63 } else if (vf->link_forced) {
64 pfe.event_data.link_event.link_status = vf->link_up;
65 pfe.event_data.link_event.link_speed =
66 (vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0);
67 } else {
68 pfe.event_data.link_event.link_status =
69 ls->link_info & I40E_AQ_LINK_UP;
70 pfe.event_data.link_event.link_speed =
71 i40e_virtchnl_link_speed(ls->link_speed);
72 }
73
74 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
75 0, (u8 *)&pfe, sizeof(pfe), NULL);
76}
77
78
79
80
81
82
83
84void i40e_vc_notify_link_state(struct i40e_pf *pf)
85{
86 int i;
87
88 for (i = 0; i < pf->num_alloc_vfs; i++)
89 i40e_vc_notify_vf_link_state(&pf->vf[i]);
90}
91
92
93
94
95
96
97
98void i40e_vc_notify_reset(struct i40e_pf *pf)
99{
100 struct virtchnl_pf_event pfe;
101
102 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
103 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
104 i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0,
105 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
106}
107
108
109
110
111
112
113
114void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
115{
116 struct virtchnl_pf_event pfe;
117 int abs_vf_id;
118
119
120 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
121 return;
122
123
124 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
125 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
126 return;
127
128 abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
129
130 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
131 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
132 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT,
133 0, (u8 *)&pfe,
134 sizeof(struct virtchnl_pf_event), NULL);
135}
136
137
138
139
140
141
142
143
144static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
145{
146 int i;
147
148 i40e_vc_notify_vf_reset(vf);
149
150
151
152
153
154
155 for (i = 0; i < 20; i++) {
156 if (i40e_reset_vf(vf, false))
157 return;
158 usleep_range(10000, 20000);
159 }
160
161 dev_warn(&vf->pf->pdev->dev,
162 "Failed to initiate reset for VF %d after 200 milliseconds\n",
163 vf->vf_id);
164}
165
166
167
168
169
170
171
172
173static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
174{
175 struct i40e_pf *pf = vf->pf;
176 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
177
178 return (vsi && (vsi->vf_id == vf->vf_id));
179}
180
181
182
183
184
185
186
187
188
189static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
190 u16 qid)
191{
192 struct i40e_pf *pf = vf->pf;
193 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
194
195 return (vsi && (qid < vsi->alloc_queue_pairs));
196}
197
198
199
200
201
202
203
204
205static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id)
206{
207 struct i40e_pf *pf = vf->pf;
208
209 return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
210}
211
212
213
214
215
216
217
218
219
220
221
222static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
223 u8 vsi_queue_id)
224{
225 struct i40e_pf *pf = vf->pf;
226 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
227 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
228
229 if (!vsi)
230 return pf_queue_id;
231
232 if (le16_to_cpu(vsi->info.mapping_flags) &
233 I40E_AQ_VSI_QUE_MAP_NONCONTIG)
234 pf_queue_id =
235 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
236 else
237 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
238 vsi_queue_id;
239
240 return pf_queue_id;
241}
242
243
244
245
246
247
248
249
250
251static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id)
252{
253 int i;
254
255 if (vf->adq_enabled) {
256
257
258
259
260 for (i = 0; i < vf->num_tc; i++) {
261 if (queue_id < vf->ch[i].num_qps) {
262 vsi_id = vf->ch[i].vsi_id;
263 break;
264 }
265
266
267
268 queue_id -= vf->ch[i].num_qps;
269 }
270 }
271
272 return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id);
273}
274
275
276
277
278
279
280
281
282
283static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
284 struct virtchnl_vector_map *vecmap)
285{
286 unsigned long linklistmap = 0, tempmap;
287 struct i40e_pf *pf = vf->pf;
288 struct i40e_hw *hw = &pf->hw;
289 u16 vsi_queue_id, pf_queue_id;
290 enum i40e_queue_type qtype;
291 u16 next_q, vector_id, size;
292 u32 reg, reg_idx;
293 u16 itr_idx = 0;
294
295 vector_id = vecmap->vector_id;
296
297 if (0 == vector_id)
298 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
299 else
300 reg_idx = I40E_VPINT_LNKLSTN(
301 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
302 (vector_id - 1));
303
304 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
305
306 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
307 goto irq_list_done;
308 }
309 tempmap = vecmap->rxq_map;
310 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
311 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
312 vsi_queue_id));
313 }
314
315 tempmap = vecmap->txq_map;
316 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
317 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
318 vsi_queue_id + 1));
319 }
320
321 size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES;
322 next_q = find_first_bit(&linklistmap, size);
323 if (unlikely(next_q == size))
324 goto irq_list_done;
325
326 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
327 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
328 pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id);
329 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
330
331 wr32(hw, reg_idx, reg);
332
333 while (next_q < size) {
334 switch (qtype) {
335 case I40E_QUEUE_TYPE_RX:
336 reg_idx = I40E_QINT_RQCTL(pf_queue_id);
337 itr_idx = vecmap->rxitr_idx;
338 break;
339 case I40E_QUEUE_TYPE_TX:
340 reg_idx = I40E_QINT_TQCTL(pf_queue_id);
341 itr_idx = vecmap->txitr_idx;
342 break;
343 default:
344 break;
345 }
346
347 next_q = find_next_bit(&linklistmap, size, next_q + 1);
348 if (next_q < size) {
349 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
350 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
351 pf_queue_id = i40e_get_real_pf_qid(vf,
352 vsi_id,
353 vsi_queue_id);
354 } else {
355 pf_queue_id = I40E_QUEUE_END_OF_LIST;
356 qtype = 0;
357 }
358
359
360 reg = (vector_id) |
361 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
362 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
363 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
364 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
365 wr32(hw, reg_idx, reg);
366 }
367
368
369
370
371 if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
372 (vector_id == 0)) {
373 reg = rd32(hw, I40E_GLINT_CTL);
374 if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
375 reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
376 wr32(hw, I40E_GLINT_CTL, reg);
377 }
378 }
379
380irq_list_done:
381 i40e_flush(hw);
382}
383
384
385
386
387
388
389static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
390{
391 struct i40e_pf *pf = vf->pf;
392 struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info;
393 u32 msix_vf;
394 u32 i;
395
396 if (!vf->qvlist_info)
397 return;
398
399 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
400 for (i = 0; i < qvlist_info->num_vectors; i++) {
401 struct virtchnl_iwarp_qv_info *qv_info;
402 u32 next_q_index, next_q_type;
403 struct i40e_hw *hw = &pf->hw;
404 u32 v_idx, reg_idx, reg;
405
406 qv_info = &qvlist_info->qv_info[i];
407 if (!qv_info)
408 continue;
409 v_idx = qv_info->v_idx;
410 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
411
412
413
414 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
415 reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
416 next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK)
417 >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT;
418 next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK)
419 >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT;
420
421 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
422 reg = (next_q_index &
423 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
424 (next_q_type <<
425 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
426
427 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
428 }
429 }
430 kfree(vf->qvlist_info);
431 vf->qvlist_info = NULL;
432}
433
434
435
436
437
438
439
440
441static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
442 struct virtchnl_iwarp_qvlist_info *qvlist_info)
443{
444 struct i40e_pf *pf = vf->pf;
445 struct i40e_hw *hw = &pf->hw;
446 struct virtchnl_iwarp_qv_info *qv_info;
447 u32 v_idx, i, reg_idx, reg;
448 u32 next_q_idx, next_q_type;
449 u32 msix_vf;
450 int ret = 0;
451
452 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
453
454 if (qvlist_info->num_vectors > msix_vf) {
455 dev_warn(&pf->pdev->dev,
456 "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n",
457 qvlist_info->num_vectors,
458 msix_vf);
459 ret = -EINVAL;
460 goto err_out;
461 }
462
463 kfree(vf->qvlist_info);
464 vf->qvlist_info = kzalloc(struct_size(vf->qvlist_info, qv_info,
465 qvlist_info->num_vectors - 1),
466 GFP_KERNEL);
467 if (!vf->qvlist_info) {
468 ret = -ENOMEM;
469 goto err_out;
470 }
471 vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
472
473 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
474 for (i = 0; i < qvlist_info->num_vectors; i++) {
475 qv_info = &qvlist_info->qv_info[i];
476 if (!qv_info)
477 continue;
478
479
480 if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) {
481 ret = -EINVAL;
482 goto err_free;
483 }
484
485 v_idx = qv_info->v_idx;
486
487 vf->qvlist_info->qv_info[i] = *qv_info;
488
489 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
490
491
492
493
494 reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
495 next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >>
496 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT);
497 next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >>
498 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
499
500 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
501 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
502 reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK |
503 (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) |
504 (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) |
505 (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) |
506 (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT));
507 wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg);
508
509 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
510 reg = (qv_info->ceq_idx &
511 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
512 (I40E_QUEUE_TYPE_PE_CEQ <<
513 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
514 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
515 }
516
517 if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
518 reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK |
519 (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) |
520 (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT));
521
522 wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg);
523 }
524 }
525
526 return 0;
527err_free:
528 kfree(vf->qvlist_info);
529 vf->qvlist_info = NULL;
530err_out:
531 return ret;
532}
533
534
535
536
537
538
539
540
541
542
543static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
544 u16 vsi_queue_id,
545 struct virtchnl_txq_info *info)
546{
547 struct i40e_pf *pf = vf->pf;
548 struct i40e_hw *hw = &pf->hw;
549 struct i40e_hmc_obj_txq tx_ctx;
550 struct i40e_vsi *vsi;
551 u16 pf_queue_id;
552 u32 qtx_ctl;
553 int ret = 0;
554
555 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
556 ret = -ENOENT;
557 goto error_context;
558 }
559 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
560 vsi = i40e_find_vsi_from_id(pf, vsi_id);
561 if (!vsi) {
562 ret = -ENOENT;
563 goto error_context;
564 }
565
566
567 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
568
569
570 tx_ctx.base = info->dma_ring_addr / 128;
571 tx_ctx.qlen = info->ring_len;
572 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
573 tx_ctx.rdylist_act = 0;
574 tx_ctx.head_wb_ena = info->headwb_enabled;
575 tx_ctx.head_wb_addr = info->dma_headwb_addr;
576
577
578 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
579 if (ret) {
580 dev_err(&pf->pdev->dev,
581 "Failed to clear VF LAN Tx queue context %d, error: %d\n",
582 pf_queue_id, ret);
583 ret = -ENOENT;
584 goto error_context;
585 }
586
587
588 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
589 if (ret) {
590 dev_err(&pf->pdev->dev,
591 "Failed to set VF LAN Tx queue context %d error: %d\n",
592 pf_queue_id, ret);
593 ret = -ENOENT;
594 goto error_context;
595 }
596
597
598 qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
599 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
600 & I40E_QTX_CTL_PF_INDX_MASK);
601 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
602 << I40E_QTX_CTL_VFVM_INDX_SHIFT)
603 & I40E_QTX_CTL_VFVM_INDX_MASK);
604 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
605 i40e_flush(hw);
606
607error_context:
608 return ret;
609}
610
611
612
613
614
615
616
617
618
619
620static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
621 u16 vsi_queue_id,
622 struct virtchnl_rxq_info *info)
623{
624 struct i40e_pf *pf = vf->pf;
625 struct i40e_hw *hw = &pf->hw;
626 struct i40e_hmc_obj_rxq rx_ctx;
627 u16 pf_queue_id;
628 int ret = 0;
629
630 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
631
632
633 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
634
635
636 rx_ctx.base = info->dma_ring_addr / 128;
637 rx_ctx.qlen = info->ring_len;
638
639 if (info->splithdr_enabled) {
640 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
641 I40E_RX_SPLIT_IP |
642 I40E_RX_SPLIT_TCP_UDP |
643 I40E_RX_SPLIT_SCTP;
644
645 if (info->hdr_size > ((2 * 1024) - 64)) {
646 ret = -EINVAL;
647 goto error_param;
648 }
649 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
650
651
652 rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
653 }
654
655
656 if (info->databuffer_size > ((16 * 1024) - 128)) {
657 ret = -EINVAL;
658 goto error_param;
659 }
660 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
661
662
663 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
664 ret = -EINVAL;
665 goto error_param;
666 }
667 rx_ctx.rxmax = info->max_pkt_size;
668
669
670 rx_ctx.dsize = 1;
671
672
673 rx_ctx.lrxqthresh = 1;
674 rx_ctx.crcstrip = 1;
675 rx_ctx.prefena = 1;
676 rx_ctx.l2tsel = 1;
677
678
679 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
680 if (ret) {
681 dev_err(&pf->pdev->dev,
682 "Failed to clear VF LAN Rx queue context %d, error: %d\n",
683 pf_queue_id, ret);
684 ret = -ENOENT;
685 goto error_param;
686 }
687
688
689 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
690 if (ret) {
691 dev_err(&pf->pdev->dev,
692 "Failed to set VF LAN Rx queue context %d error: %d\n",
693 pf_queue_id, ret);
694 ret = -ENOENT;
695 goto error_param;
696 }
697
698error_param:
699 return ret;
700}
701
702
703
704
705
706
707
708
709static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
710{
711 struct i40e_mac_filter *f = NULL;
712 struct i40e_pf *pf = vf->pf;
713 struct i40e_vsi *vsi;
714 u64 max_tx_rate = 0;
715 int ret = 0;
716
717 vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid,
718 vf->vf_id);
719
720 if (!vsi) {
721 dev_err(&pf->pdev->dev,
722 "add vsi failed for VF %d, aq_err %d\n",
723 vf->vf_id, pf->hw.aq.asq_last_status);
724 ret = -ENOENT;
725 goto error_alloc_vsi_res;
726 }
727
728 if (!idx) {
729 u64 hena = i40e_pf_get_default_rss_hena(pf);
730 u8 broadcast[ETH_ALEN];
731
732 vf->lan_vsi_idx = vsi->idx;
733 vf->lan_vsi_id = vsi->id;
734
735
736
737
738
739
740 if (vf->port_vlan_id)
741 i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
742
743 spin_lock_bh(&vsi->mac_filter_hash_lock);
744 if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
745 f = i40e_add_mac_filter(vsi,
746 vf->default_lan_addr.addr);
747 if (!f)
748 dev_info(&pf->pdev->dev,
749 "Could not add MAC filter %pM for VF %d\n",
750 vf->default_lan_addr.addr, vf->vf_id);
751 }
752 eth_broadcast_addr(broadcast);
753 f = i40e_add_mac_filter(vsi, broadcast);
754 if (!f)
755 dev_info(&pf->pdev->dev,
756 "Could not allocate VF broadcast filter\n");
757 spin_unlock_bh(&vsi->mac_filter_hash_lock);
758 wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
759 wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
760
761 ret = i40e_sync_vsi_filters(vsi);
762 if (ret)
763 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
764 }
765
766
767 if (vf->adq_enabled) {
768 vf->ch[idx].vsi_idx = vsi->idx;
769 vf->ch[idx].vsi_id = vsi->id;
770 }
771
772
773 if (vf->tx_rate) {
774 max_tx_rate = vf->tx_rate;
775 } else if (vf->ch[idx].max_tx_rate) {
776 max_tx_rate = vf->ch[idx].max_tx_rate;
777 }
778
779 if (max_tx_rate) {
780 max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR);
781 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
782 max_tx_rate, 0, NULL);
783 if (ret)
784 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
785 vf->vf_id, ret);
786 }
787
788error_alloc_vsi_res:
789 return ret;
790}
791
792
793
794
795
796
797
798
799static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf)
800{
801 struct i40e_pf *pf = vf->pf;
802 struct i40e_hw *hw = &pf->hw;
803 u32 reg, num_tc = 1;
804 u16 vsi_id, qps;
805 int i, j;
806
807 if (vf->adq_enabled)
808 num_tc = vf->num_tc;
809
810 for (i = 0; i < num_tc; i++) {
811 if (vf->adq_enabled) {
812 qps = vf->ch[i].num_qps;
813 vsi_id = vf->ch[i].vsi_id;
814 } else {
815 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
816 vsi_id = vf->lan_vsi_id;
817 }
818
819 for (j = 0; j < 7; j++) {
820 if (j * 2 >= qps) {
821
822 reg = 0x07FF07FF;
823 } else {
824 u16 qid = i40e_vc_get_pf_queue_id(vf,
825 vsi_id,
826 j * 2);
827 reg = qid;
828 qid = i40e_vc_get_pf_queue_id(vf, vsi_id,
829 (j * 2) + 1);
830 reg |= qid << 16;
831 }
832 i40e_write_rx_ctl(hw,
833 I40E_VSILAN_QTABLE(j, vsi_id),
834 reg);
835 }
836 }
837}
838
839
840
841
842
843
844
845
846static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf)
847{
848 struct i40e_pf *pf = vf->pf;
849 struct i40e_hw *hw = &pf->hw;
850 u32 reg, total_qps = 0;
851 u32 qps, num_tc = 1;
852 u16 vsi_id, qid;
853 int i, j;
854
855 if (vf->adq_enabled)
856 num_tc = vf->num_tc;
857
858 for (i = 0; i < num_tc; i++) {
859 if (vf->adq_enabled) {
860 qps = vf->ch[i].num_qps;
861 vsi_id = vf->ch[i].vsi_id;
862 } else {
863 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
864 vsi_id = vf->lan_vsi_id;
865 }
866
867 for (j = 0; j < qps; j++) {
868 qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j);
869
870 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
871 wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id),
872 reg);
873 total_qps++;
874 }
875 }
876}
877
878
879
880
881
882
883
884static void i40e_enable_vf_mappings(struct i40e_vf *vf)
885{
886 struct i40e_pf *pf = vf->pf;
887 struct i40e_hw *hw = &pf->hw;
888 u32 reg;
889
890
891
892
893
894 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
895 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
896
897
898 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
899 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
900
901 i40e_map_pf_to_vf_queues(vf);
902 i40e_map_pf_queues_to_vsi(vf);
903
904 i40e_flush(hw);
905}
906
907
908
909
910
911
912
913static void i40e_disable_vf_mappings(struct i40e_vf *vf)
914{
915 struct i40e_pf *pf = vf->pf;
916 struct i40e_hw *hw = &pf->hw;
917 int i;
918
919
920 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
921 for (i = 0; i < I40E_MAX_VSI_QP; i++)
922 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
923 I40E_QUEUE_END_OF_LIST);
924 i40e_flush(hw);
925}
926
927
928
929
930
931
932
933static void i40e_free_vf_res(struct i40e_vf *vf)
934{
935 struct i40e_pf *pf = vf->pf;
936 struct i40e_hw *hw = &pf->hw;
937 u32 reg_idx, reg;
938 int i, j, msix_vf;
939
940
941
942
943 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
944
945
946
947
948 if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) {
949 pf->queues_left += vf->num_queue_pairs -
950 I40E_DEFAULT_QUEUES_PER_VF;
951 }
952
953
954 if (vf->lan_vsi_idx) {
955 i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
956 vf->lan_vsi_idx = 0;
957 vf->lan_vsi_id = 0;
958 }
959
960
961 if (vf->adq_enabled && vf->ch[0].vsi_idx) {
962 for (j = 0; j < vf->num_tc; j++) {
963
964
965
966
967 if (j)
968 i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]);
969 vf->ch[j].vsi_idx = 0;
970 vf->ch[j].vsi_id = 0;
971 }
972 }
973 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
974
975
976 for (i = 0; i < msix_vf; i++) {
977
978 if (0 == i)
979 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
980 else
981 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
982 (vf->vf_id))
983 + (i - 1));
984 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
985 i40e_flush(hw);
986 }
987
988
989 for (i = 0; i < msix_vf; i++) {
990
991 if (0 == i)
992 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
993 else
994 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
995 (vf->vf_id))
996 + (i - 1));
997 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
998 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
999 wr32(hw, reg_idx, reg);
1000 i40e_flush(hw);
1001 }
1002
1003 vf->num_queue_pairs = 0;
1004 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1005 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
1006}
1007
1008
1009
1010
1011
1012
1013
1014static int i40e_alloc_vf_res(struct i40e_vf *vf)
1015{
1016 struct i40e_pf *pf = vf->pf;
1017 int total_queue_pairs = 0;
1018 int ret, idx;
1019
1020 if (vf->num_req_queues &&
1021 vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF)
1022 pf->num_vf_qps = vf->num_req_queues;
1023 else
1024 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
1025
1026
1027 ret = i40e_alloc_vsi_res(vf, 0);
1028 if (ret)
1029 goto error_alloc;
1030 total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
1031
1032
1033 if (vf->adq_enabled) {
1034 if (pf->queues_left >=
1035 (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) {
1036
1037 for (idx = 1; idx < vf->num_tc; idx++) {
1038 ret = i40e_alloc_vsi_res(vf, idx);
1039 if (ret)
1040 goto error_alloc;
1041 }
1042
1043 total_queue_pairs = I40E_MAX_VF_QUEUES;
1044 } else {
1045 dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n",
1046 vf->vf_id);
1047 vf->adq_enabled = false;
1048 }
1049 }
1050
1051
1052
1053
1054
1055
1056 if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF)
1057 pf->queues_left -=
1058 total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF;
1059
1060 if (vf->trusted)
1061 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1062 else
1063 clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1064
1065
1066
1067
1068 vf->num_queue_pairs = total_queue_pairs;
1069
1070
1071 set_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1072
1073error_alloc:
1074 if (ret)
1075 i40e_free_vf_res(vf);
1076
1077 return ret;
1078}
1079
1080#define VF_DEVICE_STATUS 0xAA
1081#define VF_TRANS_PENDING_MASK 0x20
1082
1083
1084
1085
1086
1087
1088
1089static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
1090{
1091 struct i40e_pf *pf = vf->pf;
1092 struct i40e_hw *hw = &pf->hw;
1093 int vf_abs_id, i;
1094 u32 reg;
1095
1096 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
1097
1098 wr32(hw, I40E_PF_PCI_CIAA,
1099 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
1100 for (i = 0; i < 100; i++) {
1101 reg = rd32(hw, I40E_PF_PCI_CIAD);
1102 if ((reg & VF_TRANS_PENDING_MASK) == 0)
1103 return 0;
1104 udelay(1);
1105 }
1106 return -EIO;
1107}
1108
1109static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi);
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
1122 u16 vsi_id,
1123 bool allmulti,
1124 bool alluni)
1125{
1126 struct i40e_pf *pf = vf->pf;
1127 struct i40e_hw *hw = &pf->hw;
1128 struct i40e_mac_filter *f;
1129 i40e_status aq_ret = 0;
1130 struct i40e_vsi *vsi;
1131 int bkt;
1132
1133 vsi = i40e_find_vsi_from_id(pf, vsi_id);
1134 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
1135 return I40E_ERR_PARAM;
1136
1137 if (vf->port_vlan_id) {
1138 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid,
1139 allmulti,
1140 vf->port_vlan_id,
1141 NULL);
1142 if (aq_ret) {
1143 int aq_err = pf->hw.aq.asq_last_status;
1144
1145 dev_err(&pf->pdev->dev,
1146 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
1147 vf->vf_id,
1148 i40e_stat_str(&pf->hw, aq_ret),
1149 i40e_aq_str(&pf->hw, aq_err));
1150 return aq_ret;
1151 }
1152
1153 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid,
1154 alluni,
1155 vf->port_vlan_id,
1156 NULL);
1157 if (aq_ret) {
1158 int aq_err = pf->hw.aq.asq_last_status;
1159
1160 dev_err(&pf->pdev->dev,
1161 "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
1162 vf->vf_id,
1163 i40e_stat_str(&pf->hw, aq_ret),
1164 i40e_aq_str(&pf->hw, aq_err));
1165 }
1166 return aq_ret;
1167 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
1168 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1169 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
1170 continue;
1171 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw,
1172 vsi->seid,
1173 allmulti,
1174 f->vlan,
1175 NULL);
1176 if (aq_ret) {
1177 int aq_err = pf->hw.aq.asq_last_status;
1178
1179 dev_err(&pf->pdev->dev,
1180 "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n",
1181 f->vlan,
1182 i40e_stat_str(&pf->hw, aq_ret),
1183 i40e_aq_str(&pf->hw, aq_err));
1184 }
1185
1186 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw,
1187 vsi->seid,
1188 alluni,
1189 f->vlan,
1190 NULL);
1191 if (aq_ret) {
1192 int aq_err = pf->hw.aq.asq_last_status;
1193
1194 dev_err(&pf->pdev->dev,
1195 "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n",
1196 f->vlan,
1197 i40e_stat_str(&pf->hw, aq_ret),
1198 i40e_aq_str(&pf->hw, aq_err));
1199 }
1200 }
1201 return aq_ret;
1202 }
1203 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, allmulti,
1204 NULL);
1205 if (aq_ret) {
1206 int aq_err = pf->hw.aq.asq_last_status;
1207
1208 dev_err(&pf->pdev->dev,
1209 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
1210 vf->vf_id,
1211 i40e_stat_str(&pf->hw, aq_ret),
1212 i40e_aq_str(&pf->hw, aq_err));
1213 return aq_ret;
1214 }
1215
1216 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, alluni,
1217 NULL, true);
1218 if (aq_ret) {
1219 int aq_err = pf->hw.aq.asq_last_status;
1220
1221 dev_err(&pf->pdev->dev,
1222 "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
1223 vf->vf_id,
1224 i40e_stat_str(&pf->hw, aq_ret),
1225 i40e_aq_str(&pf->hw, aq_err));
1226 }
1227
1228 return aq_ret;
1229}
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
1241{
1242 struct i40e_pf *pf = vf->pf;
1243 struct i40e_hw *hw = &pf->hw;
1244 u32 reg, reg_idx, bit_idx;
1245
1246
1247 clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1248
1249
1250
1251
1252
1253
1254
1255 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1256
1257
1258
1259
1260 if (!flr) {
1261
1262 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1263 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1264 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1265 i40e_flush(hw);
1266 }
1267
1268 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
1269 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
1270 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1271 i40e_flush(hw);
1272
1273 if (i40e_quiesce_vf_pci(vf))
1274 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
1275 vf->vf_id);
1276}
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
1287{
1288 struct i40e_pf *pf = vf->pf;
1289 struct i40e_hw *hw = &pf->hw;
1290 u32 reg;
1291
1292
1293 i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false);
1294
1295
1296 i40e_free_vf_res(vf);
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1309 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1310 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1311
1312
1313 if (!i40e_alloc_vf_res(vf)) {
1314 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1315 i40e_enable_vf_mappings(vf);
1316 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1317 clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1318
1319 if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
1320 &vf->vf_states))
1321 i40e_notify_client_of_vf_reset(pf, abs_vf_id);
1322 vf->num_vlan = 0;
1323 }
1324
1325
1326
1327
1328
1329 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1330}
1331
1332
1333
1334
1335
1336
1337
1338
1339bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
1340{
1341 struct i40e_pf *pf = vf->pf;
1342 struct i40e_hw *hw = &pf->hw;
1343 bool rsd = false;
1344 u32 reg;
1345 int i;
1346
1347
1348
1349
1350 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1351 return false;
1352
1353 i40e_trigger_vf_reset(vf, flr);
1354
1355
1356
1357
1358 for (i = 0; i < 10; i++) {
1359
1360
1361
1362
1363
1364 usleep_range(10000, 20000);
1365 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1366 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
1367 rsd = true;
1368 break;
1369 }
1370 }
1371
1372 if (flr)
1373 usleep_range(10000, 20000);
1374
1375 if (!rsd)
1376 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1377 vf->vf_id);
1378 usleep_range(10000, 20000);
1379
1380
1381 if (vf->lan_vsi_idx != 0)
1382 i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
1383
1384 i40e_cleanup_reset_vf(vf);
1385
1386 i40e_flush(hw);
1387 clear_bit(__I40E_VF_DISABLE, pf->state);
1388
1389 return true;
1390}
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
1405{
1406 struct i40e_hw *hw = &pf->hw;
1407 struct i40e_vf *vf;
1408 int i, v;
1409 u32 reg;
1410
1411
1412 if (!pf->num_alloc_vfs)
1413 return false;
1414
1415
1416 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1417 return false;
1418
1419
1420 for (v = 0; v < pf->num_alloc_vfs; v++)
1421 i40e_trigger_vf_reset(&pf->vf[v], flr);
1422
1423
1424
1425
1426
1427
1428
1429 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1430 usleep_range(10000, 20000);
1431
1432
1433
1434
1435 while (v < pf->num_alloc_vfs) {
1436 vf = &pf->vf[v];
1437 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1438 if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
1439 break;
1440
1441
1442
1443
1444 v++;
1445 }
1446 }
1447
1448 if (flr)
1449 usleep_range(10000, 20000);
1450
1451
1452
1453
1454 if (v < pf->num_alloc_vfs)
1455 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1456 pf->vf[v].vf_id);
1457 usleep_range(10000, 20000);
1458
1459
1460
1461
1462 for (v = 0; v < pf->num_alloc_vfs; v++) {
1463
1464 if (pf->vf[v].lan_vsi_idx == 0)
1465 continue;
1466
1467 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
1468 }
1469
1470
1471
1472
1473 for (v = 0; v < pf->num_alloc_vfs; v++) {
1474
1475 if (pf->vf[v].lan_vsi_idx == 0)
1476 continue;
1477
1478 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
1479 }
1480
1481
1482
1483
1484 mdelay(50);
1485
1486
1487 for (v = 0; v < pf->num_alloc_vfs; v++)
1488 i40e_cleanup_reset_vf(&pf->vf[v]);
1489
1490 i40e_flush(hw);
1491 clear_bit(__I40E_VF_DISABLE, pf->state);
1492
1493 return true;
1494}
1495
1496
1497
1498
1499
1500
1501
1502void i40e_free_vfs(struct i40e_pf *pf)
1503{
1504 struct i40e_hw *hw = &pf->hw;
1505 u32 reg_idx, bit_idx;
1506 int i, tmp, vf_id;
1507
1508 if (!pf->vf)
1509 return;
1510 while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1511 usleep_range(1000, 2000);
1512
1513 i40e_notify_client_of_vf_enable(pf, 0);
1514
1515
1516 for (i = 0; i < pf->num_alloc_vfs; i++) {
1517 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1518 continue;
1519
1520 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]);
1521 }
1522
1523 for (i = 0; i < pf->num_alloc_vfs; i++) {
1524 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1525 continue;
1526
1527 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
1528 }
1529
1530
1531
1532
1533
1534 if (!pci_vfs_assigned(pf->pdev))
1535 pci_disable_sriov(pf->pdev);
1536 else
1537 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
1538
1539
1540 tmp = pf->num_alloc_vfs;
1541 pf->num_alloc_vfs = 0;
1542 for (i = 0; i < tmp; i++) {
1543 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1544 i40e_free_vf_res(&pf->vf[i]);
1545
1546 i40e_disable_vf_mappings(&pf->vf[i]);
1547 }
1548
1549 kfree(pf->vf);
1550 pf->vf = NULL;
1551
1552
1553
1554
1555
1556 if (!pci_vfs_assigned(pf->pdev)) {
1557
1558
1559
1560 for (vf_id = 0; vf_id < tmp; vf_id++) {
1561 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1562 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1563 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1564 }
1565 }
1566 clear_bit(__I40E_VF_DISABLE, pf->state);
1567}
1568
1569#ifdef CONFIG_PCI_IOV
1570
1571
1572
1573
1574
1575
1576
1577int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
1578{
1579 struct i40e_vf *vfs;
1580 int i, ret = 0;
1581
1582
1583 i40e_irq_dynamic_disable_icr0(pf);
1584
1585
1586 if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
1587 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1588 if (ret) {
1589 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1590 pf->num_alloc_vfs = 0;
1591 goto err_iov;
1592 }
1593 }
1594
1595 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
1596 if (!vfs) {
1597 ret = -ENOMEM;
1598 goto err_alloc;
1599 }
1600 pf->vf = vfs;
1601
1602
1603 for (i = 0; i < num_alloc_vfs; i++) {
1604 vfs[i].pf = pf;
1605 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
1606 vfs[i].vf_id = i;
1607
1608
1609 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1610 vfs[i].spoofchk = true;
1611
1612 set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states);
1613
1614 }
1615 pf->num_alloc_vfs = num_alloc_vfs;
1616
1617
1618 i40e_reset_all_vfs(pf, false);
1619
1620 i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
1621
1622err_alloc:
1623 if (ret)
1624 i40e_free_vfs(pf);
1625err_iov:
1626
1627 i40e_irq_dynamic_enable_icr0(pf);
1628 return ret;
1629}
1630
1631#endif
1632
1633
1634
1635
1636
1637
1638
1639static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
1640{
1641#ifdef CONFIG_PCI_IOV
1642 struct i40e_pf *pf = pci_get_drvdata(pdev);
1643 int pre_existing_vfs = pci_num_vf(pdev);
1644 int err = 0;
1645
1646 if (test_bit(__I40E_TESTING, pf->state)) {
1647 dev_warn(&pdev->dev,
1648 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
1649 err = -EPERM;
1650 goto err_out;
1651 }
1652
1653 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1654 i40e_free_vfs(pf);
1655 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1656 goto out;
1657
1658 if (num_vfs > pf->num_req_vfs) {
1659 dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
1660 num_vfs, pf->num_req_vfs);
1661 err = -EPERM;
1662 goto err_out;
1663 }
1664
1665 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
1666 err = i40e_alloc_vfs(pf, num_vfs);
1667 if (err) {
1668 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
1669 goto err_out;
1670 }
1671
1672out:
1673 return num_vfs;
1674
1675err_out:
1676 return err;
1677#endif
1678 return 0;
1679}
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1690{
1691 struct i40e_pf *pf = pci_get_drvdata(pdev);
1692 int ret = 0;
1693
1694 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
1695 dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
1696 return -EAGAIN;
1697 }
1698
1699 if (num_vfs) {
1700 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
1701 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
1702 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
1703 }
1704 ret = i40e_pci_sriov_enable(pdev, num_vfs);
1705 goto sriov_configure_out;
1706 }
1707
1708 if (!pci_vfs_assigned(pf->pdev)) {
1709 i40e_free_vfs(pf);
1710 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1711 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
1712 } else {
1713 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
1714 ret = -EINVAL;
1715 goto sriov_configure_out;
1716 }
1717sriov_configure_out:
1718 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
1719 return ret;
1720}
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1735 u32 v_retval, u8 *msg, u16 msglen)
1736{
1737 struct i40e_pf *pf;
1738 struct i40e_hw *hw;
1739 int abs_vf_id;
1740 i40e_status aq_ret;
1741
1742
1743 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1744 return -EINVAL;
1745
1746 pf = vf->pf;
1747 hw = &pf->hw;
1748 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1749
1750
1751 if (v_retval) {
1752 vf->num_invalid_msgs++;
1753 dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
1754 vf->vf_id, v_opcode, v_retval);
1755 if (vf->num_invalid_msgs >
1756 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
1757 dev_err(&pf->pdev->dev,
1758 "Number of invalid messages exceeded for VF %d\n",
1759 vf->vf_id);
1760 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
1761 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1762 }
1763 } else {
1764 vf->num_valid_msgs++;
1765
1766 vf->num_invalid_msgs = 0;
1767 }
1768
1769 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
1770 msg, msglen, NULL);
1771 if (aq_ret) {
1772 dev_info(&pf->pdev->dev,
1773 "Unable to send the message to VF %d aq_err %d\n",
1774 vf->vf_id, pf->hw.aq.asq_last_status);
1775 return -EIO;
1776 }
1777
1778 return 0;
1779}
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
1790 enum virtchnl_ops opcode,
1791 i40e_status retval)
1792{
1793 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
1794}
1795
1796
1797
1798
1799
1800
1801
1802
1803static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
1804{
1805 struct virtchnl_version_info info = {
1806 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
1807 };
1808
1809 vf->vf_ver = *(struct virtchnl_version_info *)msg;
1810
1811 if (VF_IS_V10(&vf->vf_ver))
1812 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1813 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
1814 I40E_SUCCESS, (u8 *)&info,
1815 sizeof(struct virtchnl_version_info));
1816}
1817
1818
1819
1820
1821
1822static void i40e_del_qch(struct i40e_vf *vf)
1823{
1824 struct i40e_pf *pf = vf->pf;
1825 int i;
1826
1827
1828
1829
1830 for (i = 1; i < vf->num_tc; i++) {
1831 if (vf->ch[i].vsi_idx) {
1832 i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]);
1833 vf->ch[i].vsi_idx = 0;
1834 vf->ch[i].vsi_id = 0;
1835 }
1836 }
1837}
1838
1839
1840
1841
1842
1843
1844
1845
1846static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
1847{
1848 struct virtchnl_vf_resource *vfres = NULL;
1849 struct i40e_pf *pf = vf->pf;
1850 i40e_status aq_ret = 0;
1851 struct i40e_vsi *vsi;
1852 int num_vsis = 1;
1853 size_t len = 0;
1854 int ret;
1855
1856 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
1857 aq_ret = I40E_ERR_PARAM;
1858 goto err;
1859 }
1860
1861 len = struct_size(vfres, vsi_res, num_vsis);
1862 vfres = kzalloc(len, GFP_KERNEL);
1863 if (!vfres) {
1864 aq_ret = I40E_ERR_NO_MEMORY;
1865 len = 0;
1866 goto err;
1867 }
1868 if (VF_IS_V11(&vf->vf_ver))
1869 vf->driver_caps = *(u32 *)msg;
1870 else
1871 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
1872 VIRTCHNL_VF_OFFLOAD_RSS_REG |
1873 VIRTCHNL_VF_OFFLOAD_VLAN;
1874
1875 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
1876 vsi = pf->vsi[vf->lan_vsi_idx];
1877 if (!vsi->info.pvid)
1878 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
1879
1880 if (i40e_vf_client_capable(pf, vf->vf_id) &&
1881 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) {
1882 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP;
1883 set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
1884 } else {
1885 clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
1886 }
1887
1888 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1889 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
1890 } else {
1891 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
1892 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
1893 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
1894 else
1895 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
1896 }
1897
1898 if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
1899 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1900 vfres->vf_cap_flags |=
1901 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
1902 }
1903
1904 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
1905 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
1906
1907 if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) &&
1908 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
1909 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
1910
1911 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
1912 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
1913 dev_err(&pf->pdev->dev,
1914 "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
1915 vf->vf_id);
1916 aq_ret = I40E_ERR_PARAM;
1917 goto err;
1918 }
1919 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
1920 }
1921
1922 if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) {
1923 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1924 vfres->vf_cap_flags |=
1925 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
1926 }
1927
1928 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
1929 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
1930
1931 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)
1932 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ;
1933
1934 vfres->num_vsis = num_vsis;
1935 vfres->num_queue_pairs = vf->num_queue_pairs;
1936 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
1937 vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
1938 vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
1939
1940 if (vf->lan_vsi_idx) {
1941 vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
1942 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
1943 vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
1944
1945 vfres->vsi_res[0].qset_handle
1946 = le16_to_cpu(vsi->info.qs_handle[0]);
1947 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
1948 vf->default_lan_addr.addr);
1949 }
1950 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1951
1952err:
1953
1954 ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
1955 aq_ret, (u8 *)vfres, len);
1956
1957 kfree(vfres);
1958 return ret;
1959}
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
1970{
1971 if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
1972 i40e_reset_vf(vf, false);
1973}
1974
1975
1976
1977
1978
1979
1980
1981static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1982{
1983 struct i40e_mac_filter *f;
1984 int num_vlans = 0, bkt;
1985
1986 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1987 if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
1988 num_vlans++;
1989 }
1990
1991 return num_vlans;
1992}
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
2003{
2004 struct virtchnl_promisc_info *info =
2005 (struct virtchnl_promisc_info *)msg;
2006 struct i40e_pf *pf = vf->pf;
2007 i40e_status aq_ret = 0;
2008 bool allmulti = false;
2009 bool alluni = false;
2010
2011 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2012 aq_ret = I40E_ERR_PARAM;
2013 goto err_out;
2014 }
2015 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2016 dev_err(&pf->pdev->dev,
2017 "Unprivileged VF %d is attempting to configure promiscuous mode\n",
2018 vf->vf_id);
2019
2020
2021
2022
2023 aq_ret = 0;
2024 goto err_out;
2025 }
2026
2027 if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) {
2028 aq_ret = I40E_ERR_PARAM;
2029 goto err_out;
2030 }
2031
2032 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
2033 aq_ret = I40E_ERR_PARAM;
2034 goto err_out;
2035 }
2036
2037
2038 if (info->flags & FLAG_VF_MULTICAST_PROMISC)
2039 allmulti = true;
2040
2041 if (info->flags & FLAG_VF_UNICAST_PROMISC)
2042 alluni = true;
2043 aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti,
2044 alluni);
2045 if (aq_ret)
2046 goto err_out;
2047
2048 if (allmulti) {
2049 if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC,
2050 &vf->vf_states))
2051 dev_info(&pf->pdev->dev,
2052 "VF %d successfully set multicast promiscuous mode\n",
2053 vf->vf_id);
2054 } else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC,
2055 &vf->vf_states))
2056 dev_info(&pf->pdev->dev,
2057 "VF %d successfully unset multicast promiscuous mode\n",
2058 vf->vf_id);
2059
2060 if (alluni) {
2061 if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC,
2062 &vf->vf_states))
2063 dev_info(&pf->pdev->dev,
2064 "VF %d successfully set unicast promiscuous mode\n",
2065 vf->vf_id);
2066 } else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC,
2067 &vf->vf_states))
2068 dev_info(&pf->pdev->dev,
2069 "VF %d successfully unset unicast promiscuous mode\n",
2070 vf->vf_id);
2071
2072err_out:
2073
2074 return i40e_vc_send_resp_to_vf(vf,
2075 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
2076 aq_ret);
2077}
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
2088{
2089 struct virtchnl_vsi_queue_config_info *qci =
2090 (struct virtchnl_vsi_queue_config_info *)msg;
2091 struct virtchnl_queue_pair_info *qpi;
2092 struct i40e_pf *pf = vf->pf;
2093 u16 vsi_id, vsi_queue_id = 0;
2094 u16 num_qps_all = 0;
2095 i40e_status aq_ret = 0;
2096 int i, j = 0, idx = 0;
2097
2098 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2099 aq_ret = I40E_ERR_PARAM;
2100 goto error_param;
2101 }
2102
2103 if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
2104 aq_ret = I40E_ERR_PARAM;
2105 goto error_param;
2106 }
2107
2108 if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) {
2109 aq_ret = I40E_ERR_PARAM;
2110 goto error_param;
2111 }
2112
2113 if (vf->adq_enabled) {
2114 for (i = 0; i < I40E_MAX_VF_VSI; i++)
2115 num_qps_all += vf->ch[i].num_qps;
2116 if (num_qps_all != qci->num_queue_pairs) {
2117 aq_ret = I40E_ERR_PARAM;
2118 goto error_param;
2119 }
2120 }
2121
2122 vsi_id = qci->vsi_id;
2123
2124 for (i = 0; i < qci->num_queue_pairs; i++) {
2125 qpi = &qci->qpair[i];
2126
2127 if (!vf->adq_enabled) {
2128 if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
2129 qpi->txq.queue_id)) {
2130 aq_ret = I40E_ERR_PARAM;
2131 goto error_param;
2132 }
2133
2134 vsi_queue_id = qpi->txq.queue_id;
2135
2136 if (qpi->txq.vsi_id != qci->vsi_id ||
2137 qpi->rxq.vsi_id != qci->vsi_id ||
2138 qpi->rxq.queue_id != vsi_queue_id) {
2139 aq_ret = I40E_ERR_PARAM;
2140 goto error_param;
2141 }
2142 }
2143
2144 if (vf->adq_enabled) {
2145 if (idx >= ARRAY_SIZE(vf->ch)) {
2146 aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
2147 goto error_param;
2148 }
2149 vsi_id = vf->ch[idx].vsi_id;
2150 }
2151
2152 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
2153 &qpi->rxq) ||
2154 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
2155 &qpi->txq)) {
2156 aq_ret = I40E_ERR_PARAM;
2157 goto error_param;
2158 }
2159
2160
2161
2162
2163
2164
2165 if (vf->adq_enabled) {
2166 if (idx >= ARRAY_SIZE(vf->ch)) {
2167 aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
2168 goto error_param;
2169 }
2170 if (j == (vf->ch[idx].num_qps - 1)) {
2171 idx++;
2172 j = 0;
2173 vsi_queue_id = 0;
2174 } else {
2175 j++;
2176 vsi_queue_id++;
2177 }
2178 }
2179 }
2180
2181 if (!vf->adq_enabled) {
2182 pf->vsi[vf->lan_vsi_idx]->num_queue_pairs =
2183 qci->num_queue_pairs;
2184 } else {
2185 for (i = 0; i < vf->num_tc; i++)
2186 pf->vsi[vf->ch[i].vsi_idx]->num_queue_pairs =
2187 vf->ch[i].num_qps;
2188 }
2189
2190error_param:
2191
2192 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
2193 aq_ret);
2194}
2195
2196
2197
2198
2199
2200
2201
2202
2203static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
2204 unsigned long queuemap)
2205{
2206 u16 vsi_queue_id, queue_id;
2207
2208 for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
2209 if (vf->adq_enabled) {
2210 vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id;
2211 queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
2212 } else {
2213 queue_id = vsi_queue_id;
2214 }
2215
2216 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id))
2217 return -EINVAL;
2218 }
2219
2220 return 0;
2221}
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
2232{
2233 struct virtchnl_irq_map_info *irqmap_info =
2234 (struct virtchnl_irq_map_info *)msg;
2235 struct virtchnl_vector_map *map;
2236 u16 vsi_id;
2237 i40e_status aq_ret = 0;
2238 int i;
2239
2240 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2241 aq_ret = I40E_ERR_PARAM;
2242 goto error_param;
2243 }
2244
2245 if (irqmap_info->num_vectors >
2246 vf->pf->hw.func_caps.num_msix_vectors_vf) {
2247 aq_ret = I40E_ERR_PARAM;
2248 goto error_param;
2249 }
2250
2251 for (i = 0; i < irqmap_info->num_vectors; i++) {
2252 map = &irqmap_info->vecmap[i];
2253
2254 if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) ||
2255 !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) {
2256 aq_ret = I40E_ERR_PARAM;
2257 goto error_param;
2258 }
2259 vsi_id = map->vsi_id;
2260
2261 if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
2262 aq_ret = I40E_ERR_PARAM;
2263 goto error_param;
2264 }
2265
2266 if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) {
2267 aq_ret = I40E_ERR_PARAM;
2268 goto error_param;
2269 }
2270
2271 i40e_config_irq_link_list(vf, vsi_id, map);
2272 }
2273error_param:
2274
2275 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
2276 aq_ret);
2277}
2278
2279
2280
2281
2282
2283
2284
2285static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2286 bool enable)
2287{
2288 struct i40e_pf *pf = vsi->back;
2289 int ret = 0;
2290 u16 q_id;
2291
2292 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2293 ret = i40e_control_wait_tx_q(vsi->seid, pf,
2294 vsi->base_queue + q_id,
2295 false , enable);
2296 if (ret)
2297 break;
2298 }
2299 return ret;
2300}
2301
2302
2303
2304
2305
2306
2307
2308static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2309 bool enable)
2310{
2311 struct i40e_pf *pf = vsi->back;
2312 int ret = 0;
2313 u16 q_id;
2314
2315 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2316 ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id,
2317 enable);
2318 if (ret)
2319 break;
2320 }
2321 return ret;
2322}
2323
2324
2325
2326
2327
2328
2329
2330static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
2331{
2332 if ((!vqs->rx_queues && !vqs->tx_queues) ||
2333 vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) ||
2334 vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES))
2335 return false;
2336
2337 return true;
2338}
2339
2340
2341
2342
2343
2344
2345
2346
2347static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
2348{
2349 struct virtchnl_queue_select *vqs =
2350 (struct virtchnl_queue_select *)msg;
2351 struct i40e_pf *pf = vf->pf;
2352 i40e_status aq_ret = 0;
2353 int i;
2354
2355 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2356 aq_ret = I40E_ERR_PARAM;
2357 goto error_param;
2358 }
2359
2360 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2361 aq_ret = I40E_ERR_PARAM;
2362 goto error_param;
2363 }
2364
2365 if (i40e_vc_validate_vqs_bitmaps(vqs)) {
2366 aq_ret = I40E_ERR_PARAM;
2367 goto error_param;
2368 }
2369
2370
2371 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2372 true)) {
2373 aq_ret = I40E_ERR_TIMEOUT;
2374 goto error_param;
2375 }
2376 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2377 true)) {
2378 aq_ret = I40E_ERR_TIMEOUT;
2379 goto error_param;
2380 }
2381
2382
2383 if (vf->adq_enabled) {
2384
2385 for (i = 1; i < vf->num_tc; i++) {
2386 if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx]))
2387 aq_ret = I40E_ERR_TIMEOUT;
2388 }
2389 }
2390
2391 vf->queues_enabled = true;
2392
2393error_param:
2394
2395 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
2396 aq_ret);
2397}
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
2408{
2409 struct virtchnl_queue_select *vqs =
2410 (struct virtchnl_queue_select *)msg;
2411 struct i40e_pf *pf = vf->pf;
2412 i40e_status aq_ret = 0;
2413
2414
2415 vf->queues_enabled = false;
2416
2417 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2418 aq_ret = I40E_ERR_PARAM;
2419 goto error_param;
2420 }
2421
2422 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2423 aq_ret = I40E_ERR_PARAM;
2424 goto error_param;
2425 }
2426
2427 if (i40e_vc_validate_vqs_bitmaps(vqs)) {
2428 aq_ret = I40E_ERR_PARAM;
2429 goto error_param;
2430 }
2431
2432
2433 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2434 false)) {
2435 aq_ret = I40E_ERR_TIMEOUT;
2436 goto error_param;
2437 }
2438 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2439 false)) {
2440 aq_ret = I40E_ERR_TIMEOUT;
2441 goto error_param;
2442 }
2443error_param:
2444
2445 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
2446 aq_ret);
2447}
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
2460{
2461 struct virtchnl_vf_res_request *vfres =
2462 (struct virtchnl_vf_res_request *)msg;
2463 u16 req_pairs = vfres->num_queue_pairs;
2464 u8 cur_pairs = vf->num_queue_pairs;
2465 struct i40e_pf *pf = vf->pf;
2466
2467 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
2468 return -EINVAL;
2469
2470 if (req_pairs > I40E_MAX_VF_QUEUES) {
2471 dev_err(&pf->pdev->dev,
2472 "VF %d tried to request more than %d queues.\n",
2473 vf->vf_id,
2474 I40E_MAX_VF_QUEUES);
2475 vfres->num_queue_pairs = I40E_MAX_VF_QUEUES;
2476 } else if (req_pairs - cur_pairs > pf->queues_left) {
2477 dev_warn(&pf->pdev->dev,
2478 "VF %d requested %d more queues, but only %d left.\n",
2479 vf->vf_id,
2480 req_pairs - cur_pairs,
2481 pf->queues_left);
2482 vfres->num_queue_pairs = pf->queues_left + cur_pairs;
2483 } else {
2484
2485 vf->num_req_queues = req_pairs;
2486 i40e_vc_notify_vf_reset(vf);
2487 i40e_reset_vf(vf, false);
2488 return 0;
2489 }
2490
2491 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
2492 (u8 *)vfres, sizeof(*vfres));
2493}
2494
2495
2496
2497
2498
2499
2500
2501
2502static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
2503{
2504 struct virtchnl_queue_select *vqs =
2505 (struct virtchnl_queue_select *)msg;
2506 struct i40e_pf *pf = vf->pf;
2507 struct i40e_eth_stats stats;
2508 i40e_status aq_ret = 0;
2509 struct i40e_vsi *vsi;
2510
2511 memset(&stats, 0, sizeof(struct i40e_eth_stats));
2512
2513 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2514 aq_ret = I40E_ERR_PARAM;
2515 goto error_param;
2516 }
2517
2518 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2519 aq_ret = I40E_ERR_PARAM;
2520 goto error_param;
2521 }
2522
2523 vsi = pf->vsi[vf->lan_vsi_idx];
2524 if (!vsi) {
2525 aq_ret = I40E_ERR_PARAM;
2526 goto error_param;
2527 }
2528 i40e_update_eth_stats(vsi);
2529 stats = vsi->eth_stats;
2530
2531error_param:
2532
2533 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
2534 (u8 *)&stats, sizeof(stats));
2535}
2536
2537
2538
2539
2540#define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
2541#define I40E_VC_MAX_VLAN_PER_VF 16
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560static inline int i40e_check_vf_permission(struct i40e_vf *vf,
2561 struct virtchnl_ether_addr_list *al)
2562{
2563 struct i40e_pf *pf = vf->pf;
2564 struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
2565 int mac2add_cnt = 0;
2566 int i;
2567
2568 for (i = 0; i < al->num_elements; i++) {
2569 struct i40e_mac_filter *f;
2570 u8 *addr = al->list[i].addr;
2571
2572 if (is_broadcast_ether_addr(addr) ||
2573 is_zero_ether_addr(addr)) {
2574 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
2575 addr);
2576 return I40E_ERR_INVALID_MAC_ADDR;
2577 }
2578
2579
2580
2581
2582
2583
2584
2585
2586 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2587 !is_multicast_ether_addr(addr) && vf->pf_set_mac &&
2588 !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
2589 dev_err(&pf->pdev->dev,
2590 "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
2591 return -EPERM;
2592 }
2593
2594
2595 f = i40e_find_mac(vsi, addr);
2596 if (!f)
2597 ++mac2add_cnt;
2598 }
2599
2600
2601
2602
2603
2604 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2605 (i40e_count_filters(vsi) + mac2add_cnt) >
2606 I40E_VC_MAX_MAC_ADDR_PER_VF) {
2607 dev_err(&pf->pdev->dev,
2608 "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
2609 return -EPERM;
2610 }
2611 return 0;
2612}
2613
2614
2615
2616
2617
2618
2619
2620
2621static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
2622{
2623 struct virtchnl_ether_addr_list *al =
2624 (struct virtchnl_ether_addr_list *)msg;
2625 struct i40e_pf *pf = vf->pf;
2626 struct i40e_vsi *vsi = NULL;
2627 i40e_status ret = 0;
2628 int i;
2629
2630 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2631 !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
2632 ret = I40E_ERR_PARAM;
2633 goto error_param;
2634 }
2635
2636 vsi = pf->vsi[vf->lan_vsi_idx];
2637
2638
2639
2640
2641 spin_lock_bh(&vsi->mac_filter_hash_lock);
2642
2643 ret = i40e_check_vf_permission(vf, al);
2644 if (ret) {
2645 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2646 goto error_param;
2647 }
2648
2649
2650 for (i = 0; i < al->num_elements; i++) {
2651 struct i40e_mac_filter *f;
2652
2653 f = i40e_find_mac(vsi, al->list[i].addr);
2654 if (!f) {
2655 f = i40e_add_mac_filter(vsi, al->list[i].addr);
2656
2657 if (!f) {
2658 dev_err(&pf->pdev->dev,
2659 "Unable to add MAC filter %pM for VF %d\n",
2660 al->list[i].addr, vf->vf_id);
2661 ret = I40E_ERR_PARAM;
2662 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2663 goto error_param;
2664 }
2665 }
2666 }
2667 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2668
2669
2670 ret = i40e_sync_vsi_filters(vsi);
2671 if (ret)
2672 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2673 vf->vf_id, ret);
2674
2675error_param:
2676
2677 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
2678 ret);
2679}
2680
2681
2682
2683
2684
2685
2686
2687
2688static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
2689{
2690 struct virtchnl_ether_addr_list *al =
2691 (struct virtchnl_ether_addr_list *)msg;
2692 struct i40e_pf *pf = vf->pf;
2693 struct i40e_vsi *vsi = NULL;
2694 i40e_status ret = 0;
2695 int i;
2696
2697 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2698 !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
2699 ret = I40E_ERR_PARAM;
2700 goto error_param;
2701 }
2702
2703 for (i = 0; i < al->num_elements; i++) {
2704 if (is_broadcast_ether_addr(al->list[i].addr) ||
2705 is_zero_ether_addr(al->list[i].addr)) {
2706 dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
2707 al->list[i].addr, vf->vf_id);
2708 ret = I40E_ERR_INVALID_MAC_ADDR;
2709 goto error_param;
2710 }
2711 }
2712 vsi = pf->vsi[vf->lan_vsi_idx];
2713
2714 spin_lock_bh(&vsi->mac_filter_hash_lock);
2715
2716 for (i = 0; i < al->num_elements; i++)
2717 if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
2718 ret = I40E_ERR_INVALID_MAC_ADDR;
2719 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2720 goto error_param;
2721 }
2722
2723 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2724
2725
2726 ret = i40e_sync_vsi_filters(vsi);
2727 if (ret)
2728 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2729 vf->vf_id, ret);
2730
2731error_param:
2732
2733 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
2734 ret);
2735}
2736
2737
2738
2739
2740
2741
2742
2743
2744static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)
2745{
2746 struct virtchnl_vlan_filter_list *vfl =
2747 (struct virtchnl_vlan_filter_list *)msg;
2748 struct i40e_pf *pf = vf->pf;
2749 struct i40e_vsi *vsi = NULL;
2750 i40e_status aq_ret = 0;
2751 int i;
2752
2753 if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
2754 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2755 dev_err(&pf->pdev->dev,
2756 "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
2757 goto error_param;
2758 }
2759 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2760 !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
2761 aq_ret = I40E_ERR_PARAM;
2762 goto error_param;
2763 }
2764
2765 for (i = 0; i < vfl->num_elements; i++) {
2766 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
2767 aq_ret = I40E_ERR_PARAM;
2768 dev_err(&pf->pdev->dev,
2769 "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
2770 goto error_param;
2771 }
2772 }
2773 vsi = pf->vsi[vf->lan_vsi_idx];
2774 if (vsi->info.pvid) {
2775 aq_ret = I40E_ERR_PARAM;
2776 goto error_param;
2777 }
2778
2779 i40e_vlan_stripping_enable(vsi);
2780 for (i = 0; i < vfl->num_elements; i++) {
2781
2782 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
2783 if (!ret)
2784 vf->num_vlan++;
2785
2786 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
2787 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
2788 true,
2789 vfl->vlan_id[i],
2790 NULL);
2791 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
2792 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
2793 true,
2794 vfl->vlan_id[i],
2795 NULL);
2796
2797 if (ret)
2798 dev_err(&pf->pdev->dev,
2799 "Unable to add VLAN filter %d for VF %d, error %d\n",
2800 vfl->vlan_id[i], vf->vf_id, ret);
2801 }
2802
2803error_param:
2804
2805 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret);
2806}
2807
2808
2809
2810
2811
2812
2813
2814
2815static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
2816{
2817 struct virtchnl_vlan_filter_list *vfl =
2818 (struct virtchnl_vlan_filter_list *)msg;
2819 struct i40e_pf *pf = vf->pf;
2820 struct i40e_vsi *vsi = NULL;
2821 i40e_status aq_ret = 0;
2822 int i;
2823
2824 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2825 !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
2826 aq_ret = I40E_ERR_PARAM;
2827 goto error_param;
2828 }
2829
2830 for (i = 0; i < vfl->num_elements; i++) {
2831 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
2832 aq_ret = I40E_ERR_PARAM;
2833 goto error_param;
2834 }
2835 }
2836
2837 vsi = pf->vsi[vf->lan_vsi_idx];
2838 if (vsi->info.pvid) {
2839 if (vfl->num_elements > 1 || vfl->vlan_id[0])
2840 aq_ret = I40E_ERR_PARAM;
2841 goto error_param;
2842 }
2843
2844 for (i = 0; i < vfl->num_elements; i++) {
2845 i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
2846 vf->num_vlan--;
2847
2848 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
2849 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
2850 false,
2851 vfl->vlan_id[i],
2852 NULL);
2853 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
2854 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
2855 false,
2856 vfl->vlan_id[i],
2857 NULL);
2858 }
2859
2860error_param:
2861
2862 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret);
2863}
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2874{
2875 struct i40e_pf *pf = vf->pf;
2876 int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
2877 i40e_status aq_ret = 0;
2878
2879 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2880 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
2881 aq_ret = I40E_ERR_PARAM;
2882 goto error_param;
2883 }
2884
2885 i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
2886 msg, msglen);
2887
2888error_param:
2889
2890 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP,
2891 aq_ret);
2892}
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config)
2903{
2904 struct virtchnl_iwarp_qvlist_info *qvlist_info =
2905 (struct virtchnl_iwarp_qvlist_info *)msg;
2906 i40e_status aq_ret = 0;
2907
2908 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2909 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
2910 aq_ret = I40E_ERR_PARAM;
2911 goto error_param;
2912 }
2913
2914 if (config) {
2915 if (i40e_config_iwarp_qvlist(vf, qvlist_info))
2916 aq_ret = I40E_ERR_PARAM;
2917 } else {
2918 i40e_release_iwarp_qvlist(vf);
2919 }
2920
2921error_param:
2922
2923 return i40e_vc_send_resp_to_vf(vf,
2924 config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP :
2925 VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
2926 aq_ret);
2927}
2928
2929
2930
2931
2932
2933
2934
2935
2936static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
2937{
2938 struct virtchnl_rss_key *vrk =
2939 (struct virtchnl_rss_key *)msg;
2940 struct i40e_pf *pf = vf->pf;
2941 struct i40e_vsi *vsi = NULL;
2942 i40e_status aq_ret = 0;
2943
2944 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2945 !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
2946 (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
2947 aq_ret = I40E_ERR_PARAM;
2948 goto err;
2949 }
2950
2951 vsi = pf->vsi[vf->lan_vsi_idx];
2952 aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
2953err:
2954
2955 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
2956 aq_ret);
2957}
2958
2959
2960
2961
2962
2963
2964
2965
2966static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
2967{
2968 struct virtchnl_rss_lut *vrl =
2969 (struct virtchnl_rss_lut *)msg;
2970 struct i40e_pf *pf = vf->pf;
2971 struct i40e_vsi *vsi = NULL;
2972 i40e_status aq_ret = 0;
2973 u16 i;
2974
2975 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2976 !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) ||
2977 (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
2978 aq_ret = I40E_ERR_PARAM;
2979 goto err;
2980 }
2981
2982 for (i = 0; i < vrl->lut_entries; i++)
2983 if (vrl->lut[i] >= vf->num_queue_pairs) {
2984 aq_ret = I40E_ERR_PARAM;
2985 goto err;
2986 }
2987
2988 vsi = pf->vsi[vf->lan_vsi_idx];
2989 aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
2990
2991err:
2992 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
2993 aq_ret);
2994}
2995
2996
2997
2998
2999
3000
3001
3002
3003static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
3004{
3005 struct virtchnl_rss_hena *vrh = NULL;
3006 struct i40e_pf *pf = vf->pf;
3007 i40e_status aq_ret = 0;
3008 int len = 0;
3009
3010 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3011 aq_ret = I40E_ERR_PARAM;
3012 goto err;
3013 }
3014 len = sizeof(struct virtchnl_rss_hena);
3015
3016 vrh = kzalloc(len, GFP_KERNEL);
3017 if (!vrh) {
3018 aq_ret = I40E_ERR_NO_MEMORY;
3019 len = 0;
3020 goto err;
3021 }
3022 vrh->hena = i40e_pf_get_default_rss_hena(pf);
3023err:
3024
3025 aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
3026 aq_ret, (u8 *)vrh, len);
3027 kfree(vrh);
3028 return aq_ret;
3029}
3030
3031
3032
3033
3034
3035
3036
3037
3038static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
3039{
3040 struct virtchnl_rss_hena *vrh =
3041 (struct virtchnl_rss_hena *)msg;
3042 struct i40e_pf *pf = vf->pf;
3043 struct i40e_hw *hw = &pf->hw;
3044 i40e_status aq_ret = 0;
3045
3046 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3047 aq_ret = I40E_ERR_PARAM;
3048 goto err;
3049 }
3050 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
3051 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
3052 (u32)(vrh->hena >> 32));
3053
3054
3055err:
3056 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
3057}
3058
3059
3060
3061
3062
3063
3064
3065
3066static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
3067{
3068 i40e_status aq_ret = 0;
3069 struct i40e_vsi *vsi;
3070
3071 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3072 aq_ret = I40E_ERR_PARAM;
3073 goto err;
3074 }
3075
3076 vsi = vf->pf->vsi[vf->lan_vsi_idx];
3077 i40e_vlan_stripping_enable(vsi);
3078
3079
3080err:
3081 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
3082 aq_ret);
3083}
3084
3085
3086
3087
3088
3089
3090
3091
3092static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
3093{
3094 i40e_status aq_ret = 0;
3095 struct i40e_vsi *vsi;
3096
3097 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3098 aq_ret = I40E_ERR_PARAM;
3099 goto err;
3100 }
3101
3102 vsi = vf->pf->vsi[vf->lan_vsi_idx];
3103 i40e_vlan_stripping_disable(vsi);
3104
3105
3106err:
3107 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
3108 aq_ret);
3109}
3110
3111
3112
3113
3114
3115
3116
3117
3118static int i40e_validate_cloud_filter(struct i40e_vf *vf,
3119 struct virtchnl_filter *tc_filter)
3120{
3121 struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec;
3122 struct virtchnl_l4_spec data = tc_filter->data.tcp_spec;
3123 struct i40e_pf *pf = vf->pf;
3124 struct i40e_vsi *vsi = NULL;
3125 struct i40e_mac_filter *f;
3126 struct hlist_node *h;
3127 bool found = false;
3128 int bkt;
3129
3130 if (!tc_filter->action) {
3131 dev_info(&pf->pdev->dev,
3132 "VF %d: Currently ADq doesn't support Drop Action\n",
3133 vf->vf_id);
3134 goto err;
3135 }
3136
3137
3138 if (!tc_filter->action_meta ||
3139 tc_filter->action_meta > I40E_MAX_VF_VSI) {
3140 dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
3141 vf->vf_id, tc_filter->action_meta);
3142 goto err;
3143 }
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154 if (mask.dst_mac[0] && !mask.dst_ip[0]) {
3155 vsi = pf->vsi[vf->lan_vsi_idx];
3156 f = i40e_find_mac(vsi, data.dst_mac);
3157
3158 if (!f) {
3159 dev_info(&pf->pdev->dev,
3160 "Destination MAC %pM doesn't belong to VF %d\n",
3161 data.dst_mac, vf->vf_id);
3162 goto err;
3163 }
3164
3165 if (mask.vlan_id) {
3166 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f,
3167 hlist) {
3168 if (f->vlan == ntohs(data.vlan_id)) {
3169 found = true;
3170 break;
3171 }
3172 }
3173 if (!found) {
3174 dev_info(&pf->pdev->dev,
3175 "VF %d doesn't have any VLAN id %u\n",
3176 vf->vf_id, ntohs(data.vlan_id));
3177 goto err;
3178 }
3179 }
3180 } else {
3181
3182 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
3183 dev_err(&pf->pdev->dev,
3184 "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n",
3185 vf->vf_id);
3186 return I40E_ERR_CONFIG;
3187 }
3188 }
3189
3190 if (mask.dst_mac[0] & data.dst_mac[0]) {
3191 if (is_broadcast_ether_addr(data.dst_mac) ||
3192 is_zero_ether_addr(data.dst_mac)) {
3193 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n",
3194 vf->vf_id, data.dst_mac);
3195 goto err;
3196 }
3197 }
3198
3199 if (mask.src_mac[0] & data.src_mac[0]) {
3200 if (is_broadcast_ether_addr(data.src_mac) ||
3201 is_zero_ether_addr(data.src_mac)) {
3202 dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n",
3203 vf->vf_id, data.src_mac);
3204 goto err;
3205 }
3206 }
3207
3208 if (mask.dst_port & data.dst_port) {
3209 if (!data.dst_port) {
3210 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n",
3211 vf->vf_id);
3212 goto err;
3213 }
3214 }
3215
3216 if (mask.src_port & data.src_port) {
3217 if (!data.src_port) {
3218 dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n",
3219 vf->vf_id);
3220 goto err;
3221 }
3222 }
3223
3224 if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW &&
3225 tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) {
3226 dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n",
3227 vf->vf_id);
3228 goto err;
3229 }
3230
3231 if (mask.vlan_id & data.vlan_id) {
3232 if (ntohs(data.vlan_id) > I40E_MAX_VLANID) {
3233 dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n",
3234 vf->vf_id);
3235 goto err;
3236 }
3237 }
3238
3239 return I40E_SUCCESS;
3240err:
3241 return I40E_ERR_CONFIG;
3242}
3243
3244
3245
3246
3247
3248
3249static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid)
3250{
3251 struct i40e_pf *pf = vf->pf;
3252 struct i40e_vsi *vsi = NULL;
3253 int i;
3254
3255 for (i = 0; i < vf->num_tc ; i++) {
3256 vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id);
3257 if (vsi && vsi->seid == seid)
3258 return vsi;
3259 }
3260 return NULL;
3261}
3262
3263
3264
3265
3266
3267
3268
3269static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
3270{
3271 struct i40e_cloud_filter *cfilter = NULL;
3272 struct i40e_pf *pf = vf->pf;
3273 struct i40e_vsi *vsi = NULL;
3274 struct hlist_node *node;
3275 int ret;
3276
3277 hlist_for_each_entry_safe(cfilter, node,
3278 &vf->cloud_filter_list, cloud_node) {
3279 vsi = i40e_find_vsi_from_seid(vf, cfilter->seid);
3280
3281 if (!vsi) {
3282 dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n",
3283 vf->vf_id, cfilter->seid);
3284 continue;
3285 }
3286
3287 if (cfilter->dst_port)
3288 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
3289 false);
3290 else
3291 ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
3292 if (ret)
3293 dev_err(&pf->pdev->dev,
3294 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
3295 vf->vf_id, i40e_stat_str(&pf->hw, ret),
3296 i40e_aq_str(&pf->hw,
3297 pf->hw.aq.asq_last_status));
3298
3299 hlist_del(&cfilter->cloud_node);
3300 kfree(cfilter);
3301 vf->num_cloud_filters--;
3302 }
3303}
3304
3305
3306
3307
3308
3309
3310
3311
3312static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
3313{
3314 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3315 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3316 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3317 struct i40e_cloud_filter cfilter, *cf = NULL;
3318 struct i40e_pf *pf = vf->pf;
3319 struct i40e_vsi *vsi = NULL;
3320 struct hlist_node *node;
3321 i40e_status aq_ret = 0;
3322 int i, ret;
3323
3324 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3325 aq_ret = I40E_ERR_PARAM;
3326 goto err;
3327 }
3328
3329 if (!vf->adq_enabled) {
3330 dev_info(&pf->pdev->dev,
3331 "VF %d: ADq not enabled, can't apply cloud filter\n",
3332 vf->vf_id);
3333 aq_ret = I40E_ERR_PARAM;
3334 goto err;
3335 }
3336
3337 if (i40e_validate_cloud_filter(vf, vcf)) {
3338 dev_info(&pf->pdev->dev,
3339 "VF %d: Invalid input, can't apply cloud filter\n",
3340 vf->vf_id);
3341 aq_ret = I40E_ERR_PARAM;
3342 goto err;
3343 }
3344
3345 memset(&cfilter, 0, sizeof(cfilter));
3346
3347 for (i = 0; i < ETH_ALEN; i++)
3348 cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3349
3350
3351 for (i = 0; i < ETH_ALEN; i++)
3352 cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3353
3354 cfilter.vlan_id = mask.vlan_id & tcf.vlan_id;
3355 cfilter.dst_port = mask.dst_port & tcf.dst_port;
3356 cfilter.src_port = mask.src_port & tcf.src_port;
3357
3358 switch (vcf->flow_type) {
3359 case VIRTCHNL_TCP_V4_FLOW:
3360 cfilter.n_proto = ETH_P_IP;
3361 if (mask.dst_ip[0] & tcf.dst_ip[0])
3362 memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip,
3363 ARRAY_SIZE(tcf.dst_ip));
3364 else if (mask.src_ip[0] & tcf.dst_ip[0])
3365 memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip,
3366 ARRAY_SIZE(tcf.dst_ip));
3367 break;
3368 case VIRTCHNL_TCP_V6_FLOW:
3369 cfilter.n_proto = ETH_P_IPV6;
3370 if (mask.dst_ip[3] & tcf.dst_ip[3])
3371 memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip,
3372 sizeof(cfilter.ip.v6.dst_ip6));
3373 if (mask.src_ip[3] & tcf.src_ip[3])
3374 memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip,
3375 sizeof(cfilter.ip.v6.src_ip6));
3376 break;
3377 default:
3378
3379
3380
3381 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3382 vf->vf_id);
3383 }
3384
3385
3386 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3387 cfilter.seid = vsi->seid;
3388 cfilter.flags = vcf->field_flags;
3389
3390
3391 if (tcf.dst_port)
3392 ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false);
3393 else
3394 ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
3395 if (ret) {
3396 dev_err(&pf->pdev->dev,
3397 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
3398 vf->vf_id, i40e_stat_str(&pf->hw, ret),
3399 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3400 goto err;
3401 }
3402
3403 hlist_for_each_entry_safe(cf, node,
3404 &vf->cloud_filter_list, cloud_node) {
3405 if (cf->seid != cfilter.seid)
3406 continue;
3407 if (mask.dst_port)
3408 if (cfilter.dst_port != cf->dst_port)
3409 continue;
3410 if (mask.dst_mac[0])
3411 if (!ether_addr_equal(cf->src_mac, cfilter.src_mac))
3412 continue;
3413
3414 if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0])
3415 if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip,
3416 ARRAY_SIZE(tcf.dst_ip)))
3417 continue;
3418
3419 if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3])
3420 if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6,
3421 sizeof(cfilter.ip.v6.src_ip6)))
3422 continue;
3423 if (mask.vlan_id)
3424 if (cfilter.vlan_id != cf->vlan_id)
3425 continue;
3426
3427 hlist_del(&cf->cloud_node);
3428 kfree(cf);
3429 vf->num_cloud_filters--;
3430 }
3431
3432err:
3433 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER,
3434 aq_ret);
3435}
3436
3437
3438
3439
3440
3441
3442
3443
3444static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
3445{
3446 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3447 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3448 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3449 struct i40e_cloud_filter *cfilter = NULL;
3450 struct i40e_pf *pf = vf->pf;
3451 struct i40e_vsi *vsi = NULL;
3452 i40e_status aq_ret = 0;
3453 int i, ret;
3454
3455 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3456 aq_ret = I40E_ERR_PARAM;
3457 goto err_out;
3458 }
3459
3460 if (!vf->adq_enabled) {
3461 dev_info(&pf->pdev->dev,
3462 "VF %d: ADq is not enabled, can't apply cloud filter\n",
3463 vf->vf_id);
3464 aq_ret = I40E_ERR_PARAM;
3465 goto err_out;
3466 }
3467
3468 if (i40e_validate_cloud_filter(vf, vcf)) {
3469 dev_info(&pf->pdev->dev,
3470 "VF %d: Invalid input/s, can't apply cloud filter\n",
3471 vf->vf_id);
3472 aq_ret = I40E_ERR_PARAM;
3473 goto err_out;
3474 }
3475
3476 cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
3477 if (!cfilter)
3478 return -ENOMEM;
3479
3480
3481 for (i = 0; i < ETH_ALEN; i++)
3482 cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3483
3484
3485 for (i = 0; i < ETH_ALEN; i++)
3486 cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3487
3488 cfilter->vlan_id = mask.vlan_id & tcf.vlan_id;
3489 cfilter->dst_port = mask.dst_port & tcf.dst_port;
3490 cfilter->src_port = mask.src_port & tcf.src_port;
3491
3492 switch (vcf->flow_type) {
3493 case VIRTCHNL_TCP_V4_FLOW:
3494 cfilter->n_proto = ETH_P_IP;
3495 if (mask.dst_ip[0] & tcf.dst_ip[0])
3496 memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip,
3497 ARRAY_SIZE(tcf.dst_ip));
3498 else if (mask.src_ip[0] & tcf.dst_ip[0])
3499 memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip,
3500 ARRAY_SIZE(tcf.dst_ip));
3501 break;
3502 case VIRTCHNL_TCP_V6_FLOW:
3503 cfilter->n_proto = ETH_P_IPV6;
3504 if (mask.dst_ip[3] & tcf.dst_ip[3])
3505 memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip,
3506 sizeof(cfilter->ip.v6.dst_ip6));
3507 if (mask.src_ip[3] & tcf.src_ip[3])
3508 memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip,
3509 sizeof(cfilter->ip.v6.src_ip6));
3510 break;
3511 default:
3512
3513
3514
3515 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3516 vf->vf_id);
3517 }
3518
3519
3520 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3521 cfilter->seid = vsi->seid;
3522 cfilter->flags = vcf->field_flags;
3523
3524
3525 if (tcf.dst_port)
3526 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
3527 else
3528 ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
3529 if (ret) {
3530 dev_err(&pf->pdev->dev,
3531 "VF %d: Failed to add cloud filter, err %s aq_err %s\n",
3532 vf->vf_id, i40e_stat_str(&pf->hw, ret),
3533 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3534 goto err_free;
3535 }
3536
3537 INIT_HLIST_NODE(&cfilter->cloud_node);
3538 hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list);
3539
3540 cfilter = NULL;
3541 vf->num_cloud_filters++;
3542err_free:
3543 kfree(cfilter);
3544err_out:
3545 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER,
3546 aq_ret);
3547}
3548
3549
3550
3551
3552
3553
3554static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
3555{
3556 struct virtchnl_tc_info *tci =
3557 (struct virtchnl_tc_info *)msg;
3558 struct i40e_pf *pf = vf->pf;
3559 struct i40e_link_status *ls = &pf->hw.phy.link_info;
3560 int i, adq_request_qps = 0;
3561 i40e_status aq_ret = 0;
3562 u64 speed = 0;
3563
3564 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3565 aq_ret = I40E_ERR_PARAM;
3566 goto err;
3567 }
3568
3569
3570 if (vf->spoofchk) {
3571 dev_err(&pf->pdev->dev,
3572 "Spoof check is ON, turn it OFF to enable ADq\n");
3573 aq_ret = I40E_ERR_PARAM;
3574 goto err;
3575 }
3576
3577 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) {
3578 dev_err(&pf->pdev->dev,
3579 "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n",
3580 vf->vf_id);
3581 aq_ret = I40E_ERR_PARAM;
3582 goto err;
3583 }
3584
3585
3586 if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) {
3587 dev_err(&pf->pdev->dev,
3588 "VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n",
3589 vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI);
3590 aq_ret = I40E_ERR_PARAM;
3591 goto err;
3592 }
3593
3594
3595 for (i = 0; i < tci->num_tc; i++)
3596 if (!tci->list[i].count ||
3597 tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) {
3598 dev_err(&pf->pdev->dev,
3599 "VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n",
3600 vf->vf_id, i, tci->list[i].count,
3601 I40E_DEFAULT_QUEUES_PER_VF);
3602 aq_ret = I40E_ERR_PARAM;
3603 goto err;
3604 }
3605
3606
3607 adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF;
3608
3609 if (pf->queues_left < adq_request_qps) {
3610 dev_err(&pf->pdev->dev,
3611 "No queues left to allocate to VF %d\n",
3612 vf->vf_id);
3613 aq_ret = I40E_ERR_PARAM;
3614 goto err;
3615 } else {
3616
3617
3618
3619
3620 vf->num_queue_pairs = I40E_MAX_VF_QUEUES;
3621 }
3622
3623
3624 switch (ls->link_speed) {
3625 case VIRTCHNL_LINK_SPEED_100MB:
3626 speed = SPEED_100;
3627 break;
3628 case VIRTCHNL_LINK_SPEED_1GB:
3629 speed = SPEED_1000;
3630 break;
3631 case VIRTCHNL_LINK_SPEED_10GB:
3632 speed = SPEED_10000;
3633 break;
3634 case VIRTCHNL_LINK_SPEED_20GB:
3635 speed = SPEED_20000;
3636 break;
3637 case VIRTCHNL_LINK_SPEED_25GB:
3638 speed = SPEED_25000;
3639 break;
3640 case VIRTCHNL_LINK_SPEED_40GB:
3641 speed = SPEED_40000;
3642 break;
3643 default:
3644 dev_err(&pf->pdev->dev,
3645 "Cannot detect link speed\n");
3646 aq_ret = I40E_ERR_PARAM;
3647 goto err;
3648 }
3649
3650
3651 vf->num_tc = tci->num_tc;
3652 for (i = 0; i < vf->num_tc; i++) {
3653 if (tci->list[i].max_tx_rate) {
3654 if (tci->list[i].max_tx_rate > speed) {
3655 dev_err(&pf->pdev->dev,
3656 "Invalid max tx rate %llu specified for VF %d.",
3657 tci->list[i].max_tx_rate,
3658 vf->vf_id);
3659 aq_ret = I40E_ERR_PARAM;
3660 goto err;
3661 } else {
3662 vf->ch[i].max_tx_rate =
3663 tci->list[i].max_tx_rate;
3664 }
3665 }
3666 vf->ch[i].num_qps = tci->list[i].count;
3667 }
3668
3669
3670 vf->adq_enabled = true;
3671
3672
3673
3674
3675 vf->num_req_queues = 0;
3676
3677
3678 i40e_vc_notify_vf_reset(vf);
3679 i40e_reset_vf(vf, false);
3680
3681 return I40E_SUCCESS;
3682
3683
3684err:
3685 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS,
3686 aq_ret);
3687}
3688
3689
3690
3691
3692
3693
3694static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
3695{
3696 struct i40e_pf *pf = vf->pf;
3697 i40e_status aq_ret = 0;
3698
3699 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3700 aq_ret = I40E_ERR_PARAM;
3701 goto err;
3702 }
3703
3704 if (vf->adq_enabled) {
3705 i40e_del_all_cloud_filters(vf);
3706 i40e_del_qch(vf);
3707 vf->adq_enabled = false;
3708 vf->num_tc = 0;
3709 dev_info(&pf->pdev->dev,
3710 "Deleting Queue Channels and cloud filters for ADq on VF %d\n",
3711 vf->vf_id);
3712 } else {
3713 dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n",
3714 vf->vf_id);
3715 aq_ret = I40E_ERR_PARAM;
3716 }
3717
3718
3719 i40e_vc_notify_vf_reset(vf);
3720 i40e_reset_vf(vf, false);
3721
3722 return I40E_SUCCESS;
3723
3724err:
3725 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS,
3726 aq_ret);
3727}
3728
3729
3730
3731
3732
3733
3734
3735
3736
3737
3738
3739
3740
3741int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
3742 u32 __always_unused v_retval, u8 *msg, u16 msglen)
3743{
3744 struct i40e_hw *hw = &pf->hw;
3745 int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
3746 struct i40e_vf *vf;
3747 int ret;
3748
3749 pf->vf_aq_requests++;
3750 if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs)
3751 return -EINVAL;
3752 vf = &(pf->vf[local_vf_id]);
3753
3754
3755 if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
3756 return I40E_ERR_PARAM;
3757
3758
3759 ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
3760
3761 if (ret) {
3762 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
3763 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
3764 local_vf_id, v_opcode, msglen);
3765 switch (ret) {
3766 case VIRTCHNL_STATUS_ERR_PARAM:
3767 return -EPERM;
3768 default:
3769 return -EINVAL;
3770 }
3771 }
3772
3773 switch (v_opcode) {
3774 case VIRTCHNL_OP_VERSION:
3775 ret = i40e_vc_get_version_msg(vf, msg);
3776 break;
3777 case VIRTCHNL_OP_GET_VF_RESOURCES:
3778 ret = i40e_vc_get_vf_resources_msg(vf, msg);
3779 i40e_vc_notify_vf_link_state(vf);
3780 break;
3781 case VIRTCHNL_OP_RESET_VF:
3782 i40e_vc_reset_vf_msg(vf);
3783 ret = 0;
3784 break;
3785 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
3786 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg);
3787 break;
3788 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
3789 ret = i40e_vc_config_queues_msg(vf, msg);
3790 break;
3791 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
3792 ret = i40e_vc_config_irq_map_msg(vf, msg);
3793 break;
3794 case VIRTCHNL_OP_ENABLE_QUEUES:
3795 ret = i40e_vc_enable_queues_msg(vf, msg);
3796 i40e_vc_notify_vf_link_state(vf);
3797 break;
3798 case VIRTCHNL_OP_DISABLE_QUEUES:
3799 ret = i40e_vc_disable_queues_msg(vf, msg);
3800 break;
3801 case VIRTCHNL_OP_ADD_ETH_ADDR:
3802 ret = i40e_vc_add_mac_addr_msg(vf, msg);
3803 break;
3804 case VIRTCHNL_OP_DEL_ETH_ADDR:
3805 ret = i40e_vc_del_mac_addr_msg(vf, msg);
3806 break;
3807 case VIRTCHNL_OP_ADD_VLAN:
3808 ret = i40e_vc_add_vlan_msg(vf, msg);
3809 break;
3810 case VIRTCHNL_OP_DEL_VLAN:
3811 ret = i40e_vc_remove_vlan_msg(vf, msg);
3812 break;
3813 case VIRTCHNL_OP_GET_STATS:
3814 ret = i40e_vc_get_stats_msg(vf, msg);
3815 break;
3816 case VIRTCHNL_OP_IWARP:
3817 ret = i40e_vc_iwarp_msg(vf, msg, msglen);
3818 break;
3819 case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
3820 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, true);
3821 break;
3822 case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
3823 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, false);
3824 break;
3825 case VIRTCHNL_OP_CONFIG_RSS_KEY:
3826 ret = i40e_vc_config_rss_key(vf, msg);
3827 break;
3828 case VIRTCHNL_OP_CONFIG_RSS_LUT:
3829 ret = i40e_vc_config_rss_lut(vf, msg);
3830 break;
3831 case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
3832 ret = i40e_vc_get_rss_hena(vf, msg);
3833 break;
3834 case VIRTCHNL_OP_SET_RSS_HENA:
3835 ret = i40e_vc_set_rss_hena(vf, msg);
3836 break;
3837 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
3838 ret = i40e_vc_enable_vlan_stripping(vf, msg);
3839 break;
3840 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
3841 ret = i40e_vc_disable_vlan_stripping(vf, msg);
3842 break;
3843 case VIRTCHNL_OP_REQUEST_QUEUES:
3844 ret = i40e_vc_request_queues_msg(vf, msg);
3845 break;
3846 case VIRTCHNL_OP_ENABLE_CHANNELS:
3847 ret = i40e_vc_add_qch_msg(vf, msg);
3848 break;
3849 case VIRTCHNL_OP_DISABLE_CHANNELS:
3850 ret = i40e_vc_del_qch_msg(vf, msg);
3851 break;
3852 case VIRTCHNL_OP_ADD_CLOUD_FILTER:
3853 ret = i40e_vc_add_cloud_filter(vf, msg);
3854 break;
3855 case VIRTCHNL_OP_DEL_CLOUD_FILTER:
3856 ret = i40e_vc_del_cloud_filter(vf, msg);
3857 break;
3858 case VIRTCHNL_OP_UNKNOWN:
3859 default:
3860 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
3861 v_opcode, local_vf_id);
3862 ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
3863 I40E_ERR_NOT_IMPLEMENTED);
3864 break;
3865 }
3866
3867 return ret;
3868}
3869
3870
3871
3872
3873
3874
3875
3876
3877int i40e_vc_process_vflr_event(struct i40e_pf *pf)
3878{
3879 struct i40e_hw *hw = &pf->hw;
3880 u32 reg, reg_idx, bit_idx;
3881 struct i40e_vf *vf;
3882 int vf_id;
3883
3884 if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
3885 return 0;
3886
3887
3888
3889
3890
3891
3892 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3893 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
3894 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3895 i40e_flush(hw);
3896
3897 clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
3898 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
3899 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
3900 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
3901
3902 vf = &pf->vf[vf_id];
3903 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
3904 if (reg & BIT(bit_idx))
3905
3906 i40e_reset_vf(vf, true);
3907 }
3908
3909 return 0;
3910}
3911
3912
3913
3914
3915
3916
3917
3918
3919
3920
3921static int i40e_validate_vf(struct i40e_pf *pf, int vf_id)
3922{
3923 struct i40e_vsi *vsi;
3924 struct i40e_vf *vf;
3925 int ret = 0;
3926
3927 if (vf_id >= pf->num_alloc_vfs) {
3928 dev_err(&pf->pdev->dev,
3929 "Invalid VF Identifier %d\n", vf_id);
3930 ret = -EINVAL;
3931 goto err_out;
3932 }
3933 vf = &pf->vf[vf_id];
3934 vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id);
3935 if (!vsi)
3936 ret = -EINVAL;
3937err_out:
3938 return ret;
3939}
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
3950{
3951 struct i40e_netdev_priv *np = netdev_priv(netdev);
3952 struct i40e_vsi *vsi = np->vsi;
3953 struct i40e_pf *pf = vsi->back;
3954 struct i40e_mac_filter *f;
3955 struct i40e_vf *vf;
3956 int ret = 0;
3957 struct hlist_node *h;
3958 int bkt;
3959 u8 i;
3960
3961 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
3962 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
3963 return -EAGAIN;
3964 }
3965
3966
3967 ret = i40e_validate_vf(pf, vf_id);
3968 if (ret)
3969 goto error_param;
3970
3971 vf = &pf->vf[vf_id];
3972 vsi = pf->vsi[vf->lan_vsi_idx];
3973
3974
3975
3976
3977
3978
3979
3980 for (i = 0; i < 15; i++) {
3981 if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
3982 if (i > 0)
3983 vsi = pf->vsi[vf->lan_vsi_idx];
3984 break;
3985 }
3986 msleep(20);
3987 }
3988 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
3989 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
3990 vf_id);
3991 ret = -EAGAIN;
3992 goto error_param;
3993 }
3994
3995 if (is_multicast_ether_addr(mac)) {
3996 dev_err(&pf->pdev->dev,
3997 "Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
3998 ret = -EINVAL;
3999 goto error_param;
4000 }
4001
4002
4003
4004
4005 spin_lock_bh(&vsi->mac_filter_hash_lock);
4006
4007
4008 if (!is_zero_ether_addr(vf->default_lan_addr.addr))
4009 i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
4010
4011
4012
4013
4014 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
4015 __i40e_del_filter(vsi, f);
4016
4017 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4018
4019
4020 if (i40e_sync_vsi_filters(vsi)) {
4021 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
4022 ret = -EIO;
4023 goto error_param;
4024 }
4025 ether_addr_copy(vf->default_lan_addr.addr, mac);
4026
4027 if (is_zero_ether_addr(mac)) {
4028 vf->pf_set_mac = false;
4029 dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id);
4030 } else {
4031 vf->pf_set_mac = true;
4032 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n",
4033 mac, vf_id);
4034 }
4035
4036
4037
4038
4039 i40e_vc_disable_vf(vf);
4040 dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n");
4041
4042error_param:
4043 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4044 return ret;
4045}
4046
4047
4048
4049
4050
4051
4052
4053
4054
4055static bool i40e_vsi_has_vlans(struct i40e_vsi *vsi)
4056{
4057 bool have_vlans;
4058
4059
4060
4061
4062 if (vsi->info.pvid)
4063 return false;
4064
4065
4066
4067
4068 spin_lock_bh(&vsi->mac_filter_hash_lock);
4069 have_vlans = i40e_is_vsi_in_vlan(vsi);
4070 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4071
4072 return have_vlans;
4073}
4074
4075
4076
4077
4078
4079
4080
4081
4082
4083
4084
4085int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
4086 u16 vlan_id, u8 qos, __be16 vlan_proto)
4087{
4088 u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
4089 struct i40e_netdev_priv *np = netdev_priv(netdev);
4090 bool allmulti = false, alluni = false;
4091 struct i40e_pf *pf = np->vsi->back;
4092 struct i40e_vsi *vsi;
4093 struct i40e_vf *vf;
4094 int ret = 0;
4095
4096 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4097 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4098 return -EAGAIN;
4099 }
4100
4101
4102 ret = i40e_validate_vf(pf, vf_id);
4103 if (ret)
4104 goto error_pvid;
4105
4106 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
4107 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
4108 ret = -EINVAL;
4109 goto error_pvid;
4110 }
4111
4112 if (vlan_proto != htons(ETH_P_8021Q)) {
4113 dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
4114 ret = -EPROTONOSUPPORT;
4115 goto error_pvid;
4116 }
4117
4118 vf = &pf->vf[vf_id];
4119 vsi = pf->vsi[vf->lan_vsi_idx];
4120 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4121 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4122 vf_id);
4123 ret = -EAGAIN;
4124 goto error_pvid;
4125 }
4126
4127 if (le16_to_cpu(vsi->info.pvid) == vlanprio)
4128
4129 goto error_pvid;
4130
4131 if (i40e_vsi_has_vlans(vsi)) {
4132 dev_err(&pf->pdev->dev,
4133 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
4134 vf_id);
4135
4136
4137
4138
4139 i40e_vc_disable_vf(vf);
4140
4141 vsi = pf->vsi[vf->lan_vsi_idx];
4142 }
4143
4144
4145 spin_lock_bh(&vsi->mac_filter_hash_lock);
4146
4147
4148
4149
4150
4151
4152
4153
4154
4155 if ((!(vlan_id || qos) ||
4156 vlanprio != le16_to_cpu(vsi->info.pvid)) &&
4157 vsi->info.pvid) {
4158 ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
4159 if (ret) {
4160 dev_info(&vsi->back->pdev->dev,
4161 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4162 vsi->back->hw.aq.asq_last_status);
4163 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4164 goto error_pvid;
4165 }
4166 }
4167
4168 if (vsi->info.pvid) {
4169
4170 i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) &
4171 VLAN_VID_MASK));
4172 }
4173
4174 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4175
4176
4177 ret = i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id,
4178 allmulti, alluni);
4179 if (ret) {
4180 dev_err(&pf->pdev->dev, "Unable to config VF promiscuous mode\n");
4181 goto error_pvid;
4182 }
4183
4184 if (vlan_id || qos)
4185 ret = i40e_vsi_add_pvid(vsi, vlanprio);
4186 else
4187 i40e_vsi_remove_pvid(vsi);
4188 spin_lock_bh(&vsi->mac_filter_hash_lock);
4189
4190 if (vlan_id) {
4191 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
4192 vlan_id, qos, vf_id);
4193
4194
4195 ret = i40e_add_vlan_all_mac(vsi, vlan_id);
4196 if (ret) {
4197 dev_info(&vsi->back->pdev->dev,
4198 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4199 vsi->back->hw.aq.asq_last_status);
4200 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4201 goto error_pvid;
4202 }
4203
4204
4205 i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY);
4206 }
4207
4208 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4209
4210 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
4211 alluni = true;
4212
4213 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
4214 allmulti = true;
4215
4216
4217 i40e_service_event_schedule(vsi->back);
4218
4219 if (ret) {
4220 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
4221 goto error_pvid;
4222 }
4223
4224
4225
4226
4227 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
4228
4229 ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni);
4230 if (ret) {
4231 dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n");
4232 goto error_pvid;
4233 }
4234
4235 ret = 0;
4236
4237error_pvid:
4238 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4239 return ret;
4240}
4241
4242
4243
4244
4245
4246
4247
4248
4249
4250
4251int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
4252 int max_tx_rate)
4253{
4254 struct i40e_netdev_priv *np = netdev_priv(netdev);
4255 struct i40e_pf *pf = np->vsi->back;
4256 struct i40e_vsi *vsi;
4257 struct i40e_vf *vf;
4258 int ret = 0;
4259
4260 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4261 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4262 return -EAGAIN;
4263 }
4264
4265
4266 ret = i40e_validate_vf(pf, vf_id);
4267 if (ret)
4268 goto error;
4269
4270 if (min_tx_rate) {
4271 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
4272 min_tx_rate, vf_id);
4273 ret = -EINVAL;
4274 goto error;
4275 }
4276
4277 vf = &pf->vf[vf_id];
4278 vsi = pf->vsi[vf->lan_vsi_idx];
4279 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4280 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4281 vf_id);
4282 ret = -EAGAIN;
4283 goto error;
4284 }
4285
4286 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
4287 if (ret)
4288 goto error;
4289
4290 vf->tx_rate = max_tx_rate;
4291error:
4292 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4293 return ret;
4294}
4295
4296
4297
4298
4299
4300
4301
4302
4303
4304int i40e_ndo_get_vf_config(struct net_device *netdev,
4305 int vf_id, struct ifla_vf_info *ivi)
4306{
4307 struct i40e_netdev_priv *np = netdev_priv(netdev);
4308 struct i40e_vsi *vsi = np->vsi;
4309 struct i40e_pf *pf = vsi->back;
4310 struct i40e_vf *vf;
4311 int ret = 0;
4312
4313 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4314 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4315 return -EAGAIN;
4316 }
4317
4318
4319 ret = i40e_validate_vf(pf, vf_id);
4320 if (ret)
4321 goto error_param;
4322
4323 vf = &pf->vf[vf_id];
4324
4325 vsi = pf->vsi[vf->lan_vsi_idx];
4326 if (!vsi) {
4327 ret = -ENOENT;
4328 goto error_param;
4329 }
4330
4331 ivi->vf = vf_id;
4332
4333 ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
4334
4335 ivi->max_tx_rate = vf->tx_rate;
4336 ivi->min_tx_rate = 0;
4337 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
4338 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
4339 I40E_VLAN_PRIORITY_SHIFT;
4340 if (vf->link_forced == false)
4341 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
4342 else if (vf->link_up == true)
4343 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
4344 else
4345 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
4346 ivi->spoofchk = vf->spoofchk;
4347 ivi->trusted = vf->trusted;
4348 ret = 0;
4349
4350error_param:
4351 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4352 return ret;
4353}
4354
4355
4356
4357
4358
4359
4360
4361
4362
4363int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
4364{
4365 struct i40e_netdev_priv *np = netdev_priv(netdev);
4366 struct i40e_pf *pf = np->vsi->back;
4367 struct virtchnl_pf_event pfe;
4368 struct i40e_hw *hw = &pf->hw;
4369 struct i40e_vf *vf;
4370 int abs_vf_id;
4371 int ret = 0;
4372
4373 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4374 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4375 return -EAGAIN;
4376 }
4377
4378
4379 if (vf_id >= pf->num_alloc_vfs) {
4380 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4381 ret = -EINVAL;
4382 goto error_out;
4383 }
4384
4385 vf = &pf->vf[vf_id];
4386 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
4387
4388 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
4389 pfe.severity = PF_EVENT_SEVERITY_INFO;
4390
4391 switch (link) {
4392 case IFLA_VF_LINK_STATE_AUTO:
4393 vf->link_forced = false;
4394 pfe.event_data.link_event.link_status =
4395 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
4396 pfe.event_data.link_event.link_speed =
4397 (enum virtchnl_link_speed)
4398 pf->hw.phy.link_info.link_speed;
4399 break;
4400 case IFLA_VF_LINK_STATE_ENABLE:
4401 vf->link_forced = true;
4402 vf->link_up = true;
4403 pfe.event_data.link_event.link_status = true;
4404 pfe.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB;
4405 break;
4406 case IFLA_VF_LINK_STATE_DISABLE:
4407 vf->link_forced = true;
4408 vf->link_up = false;
4409 pfe.event_data.link_event.link_status = false;
4410 pfe.event_data.link_event.link_speed = 0;
4411 break;
4412 default:
4413 ret = -EINVAL;
4414 goto error_out;
4415 }
4416
4417 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
4418 0, (u8 *)&pfe, sizeof(pfe), NULL);
4419
4420error_out:
4421 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4422 return ret;
4423}
4424
4425
4426
4427
4428
4429
4430
4431
4432
4433int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
4434{
4435 struct i40e_netdev_priv *np = netdev_priv(netdev);
4436 struct i40e_vsi *vsi = np->vsi;
4437 struct i40e_pf *pf = vsi->back;
4438 struct i40e_vsi_context ctxt;
4439 struct i40e_hw *hw = &pf->hw;
4440 struct i40e_vf *vf;
4441 int ret = 0;
4442
4443 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4444 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4445 return -EAGAIN;
4446 }
4447
4448
4449 if (vf_id >= pf->num_alloc_vfs) {
4450 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4451 ret = -EINVAL;
4452 goto out;
4453 }
4454
4455 vf = &(pf->vf[vf_id]);
4456 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4457 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4458 vf_id);
4459 ret = -EAGAIN;
4460 goto out;
4461 }
4462
4463 if (enable == vf->spoofchk)
4464 goto out;
4465
4466 vf->spoofchk = enable;
4467 memset(&ctxt, 0, sizeof(ctxt));
4468 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
4469 ctxt.pf_num = pf->hw.pf_id;
4470 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
4471 if (enable)
4472 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
4473 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
4474 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4475 if (ret) {
4476 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
4477 ret);
4478 ret = -EIO;
4479 }
4480out:
4481 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4482 return ret;
4483}
4484
4485
4486
4487
4488
4489
4490
4491
4492
4493int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
4494{
4495 struct i40e_netdev_priv *np = netdev_priv(netdev);
4496 struct i40e_pf *pf = np->vsi->back;
4497 struct i40e_vf *vf;
4498 int ret = 0;
4499
4500 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4501 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4502 return -EAGAIN;
4503 }
4504
4505
4506 if (vf_id >= pf->num_alloc_vfs) {
4507 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4508 ret = -EINVAL;
4509 goto out;
4510 }
4511
4512 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
4513 dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
4514 ret = -EINVAL;
4515 goto out;
4516 }
4517
4518 vf = &pf->vf[vf_id];
4519
4520 if (setting == vf->trusted)
4521 goto out;
4522
4523 vf->trusted = setting;
4524 i40e_vc_disable_vf(vf);
4525 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
4526 vf_id, setting ? "" : "un");
4527
4528 if (vf->adq_enabled) {
4529 if (!vf->trusted) {
4530 dev_info(&pf->pdev->dev,
4531 "VF %u no longer Trusted, deleting all cloud filters\n",
4532 vf_id);
4533 i40e_del_all_cloud_filters(vf);
4534 }
4535 }
4536
4537out:
4538 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4539 return ret;
4540}
4541
4542
4543
4544
4545
4546
4547
4548int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
4549 struct ifla_vf_stats *vf_stats)
4550{
4551 struct i40e_netdev_priv *np = netdev_priv(netdev);
4552 struct i40e_pf *pf = np->vsi->back;
4553 struct i40e_eth_stats *stats;
4554 struct i40e_vsi *vsi;
4555 struct i40e_vf *vf;
4556
4557
4558 if (i40e_validate_vf(pf, vf_id))
4559 return -EINVAL;
4560
4561 vf = &pf->vf[vf_id];
4562 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4563 dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
4564 return -EBUSY;
4565 }
4566
4567 vsi = pf->vsi[vf->lan_vsi_idx];
4568 if (!vsi)
4569 return -EINVAL;
4570
4571 i40e_update_eth_stats(vsi);
4572 stats = &vsi->eth_stats;
4573
4574 memset(vf_stats, 0, sizeof(*vf_stats));
4575
4576 vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
4577 stats->rx_multicast;
4578 vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
4579 stats->tx_multicast;
4580 vf_stats->rx_bytes = stats->rx_bytes;
4581 vf_stats->tx_bytes = stats->tx_bytes;
4582 vf_stats->broadcast = stats->rx_broadcast;
4583 vf_stats->multicast = stats->rx_multicast;
4584 vf_stats->rx_dropped = stats->rx_discards;
4585 vf_stats->tx_dropped = stats->tx_discards;
4586
4587 return 0;
4588}
4589