1
2
3
4#include "i40e.h"
5
6
7
8
9
10
11
12
13
14
15
16
17
18static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
19 enum virtchnl_ops v_opcode,
20 i40e_status v_retval, u8 *msg,
21 u16 msglen)
22{
23 struct i40e_hw *hw = &pf->hw;
24 struct i40e_vf *vf = pf->vf;
25 int i;
26
27 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
28 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
29
30 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
31 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
32 continue;
33
34
35
36
37 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
38 msg, msglen, NULL);
39 }
40}
41
42
43
44
45
46
47
48static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
49{
50 struct virtchnl_pf_event pfe;
51 struct i40e_pf *pf = vf->pf;
52 struct i40e_hw *hw = &pf->hw;
53 struct i40e_link_status *ls = &pf->hw.phy.link_info;
54 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
55
56 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
57 pfe.severity = PF_EVENT_SEVERITY_INFO;
58 if (vf->link_forced) {
59 pfe.event_data.link_event.link_status = vf->link_up;
60 pfe.event_data.link_event.link_speed =
61 (vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0);
62 } else {
63 pfe.event_data.link_event.link_status =
64 ls->link_info & I40E_AQ_LINK_UP;
65 pfe.event_data.link_event.link_speed =
66 i40e_virtchnl_link_speed(ls->link_speed);
67 }
68 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
69 0, (u8 *)&pfe, sizeof(pfe), NULL);
70}
71
72
73
74
75
76
77
78void i40e_vc_notify_link_state(struct i40e_pf *pf)
79{
80 int i;
81
82 for (i = 0; i < pf->num_alloc_vfs; i++)
83 i40e_vc_notify_vf_link_state(&pf->vf[i]);
84}
85
86
87
88
89
90
91
92void i40e_vc_notify_reset(struct i40e_pf *pf)
93{
94 struct virtchnl_pf_event pfe;
95
96 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
97 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
98 i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0,
99 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
100}
101
102
103
104
105
106
107
108void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
109{
110 struct virtchnl_pf_event pfe;
111 int abs_vf_id;
112
113
114 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
115 return;
116
117
118 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
119 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
120 return;
121
122 abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
123
124 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
125 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
126 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT,
127 0, (u8 *)&pfe,
128 sizeof(struct virtchnl_pf_event), NULL);
129}
130
131
132
133
134
135
136
137
138static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
139{
140 int i;
141
142 i40e_vc_notify_vf_reset(vf);
143
144
145
146
147
148
149 for (i = 0; i < 20; i++) {
150 if (i40e_reset_vf(vf, false))
151 return;
152 usleep_range(10000, 20000);
153 }
154
155 dev_warn(&vf->pf->pdev->dev,
156 "Failed to initiate reset for VF %d after 200 milliseconds\n",
157 vf->vf_id);
158}
159
160
161
162
163
164
165
166
167static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
168{
169 struct i40e_pf *pf = vf->pf;
170 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
171
172 return (vsi && (vsi->vf_id == vf->vf_id));
173}
174
175
176
177
178
179
180
181
182
183static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
184 u8 qid)
185{
186 struct i40e_pf *pf = vf->pf;
187 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
188
189 return (vsi && (qid < vsi->alloc_queue_pairs));
190}
191
192
193
194
195
196
197
198
199static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
200{
201 struct i40e_pf *pf = vf->pf;
202
203 return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
204}
205
206
207
208
209
210
211
212
213
214
215
216static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
217 u8 vsi_queue_id)
218{
219 struct i40e_pf *pf = vf->pf;
220 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
221 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
222
223 if (!vsi)
224 return pf_queue_id;
225
226 if (le16_to_cpu(vsi->info.mapping_flags) &
227 I40E_AQ_VSI_QUE_MAP_NONCONTIG)
228 pf_queue_id =
229 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
230 else
231 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
232 vsi_queue_id;
233
234 return pf_queue_id;
235}
236
237
238
239
240
241
242
243
244
245static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id)
246{
247 int i;
248
249 if (vf->adq_enabled) {
250
251
252
253
254 for (i = 0; i < vf->num_tc; i++) {
255 if (queue_id < vf->ch[i].num_qps) {
256 vsi_id = vf->ch[i].vsi_id;
257 break;
258 }
259
260
261
262 queue_id -= vf->ch[i].num_qps;
263 }
264 }
265
266 return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id);
267}
268
269
270
271
272
273
274
275
276
277static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
278 struct virtchnl_vector_map *vecmap)
279{
280 unsigned long linklistmap = 0, tempmap;
281 struct i40e_pf *pf = vf->pf;
282 struct i40e_hw *hw = &pf->hw;
283 u16 vsi_queue_id, pf_queue_id;
284 enum i40e_queue_type qtype;
285 u16 next_q, vector_id, size;
286 u32 reg, reg_idx;
287 u16 itr_idx = 0;
288
289 vector_id = vecmap->vector_id;
290
291 if (0 == vector_id)
292 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
293 else
294 reg_idx = I40E_VPINT_LNKLSTN(
295 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
296 (vector_id - 1));
297
298 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
299
300 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
301 goto irq_list_done;
302 }
303 tempmap = vecmap->rxq_map;
304 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
305 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
306 vsi_queue_id));
307 }
308
309 tempmap = vecmap->txq_map;
310 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
311 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
312 vsi_queue_id + 1));
313 }
314
315 size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES;
316 next_q = find_first_bit(&linklistmap, size);
317 if (unlikely(next_q == size))
318 goto irq_list_done;
319
320 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
321 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
322 pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id);
323 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
324
325 wr32(hw, reg_idx, reg);
326
327 while (next_q < size) {
328 switch (qtype) {
329 case I40E_QUEUE_TYPE_RX:
330 reg_idx = I40E_QINT_RQCTL(pf_queue_id);
331 itr_idx = vecmap->rxitr_idx;
332 break;
333 case I40E_QUEUE_TYPE_TX:
334 reg_idx = I40E_QINT_TQCTL(pf_queue_id);
335 itr_idx = vecmap->txitr_idx;
336 break;
337 default:
338 break;
339 }
340
341 next_q = find_next_bit(&linklistmap, size, next_q + 1);
342 if (next_q < size) {
343 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
344 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
345 pf_queue_id = i40e_get_real_pf_qid(vf,
346 vsi_id,
347 vsi_queue_id);
348 } else {
349 pf_queue_id = I40E_QUEUE_END_OF_LIST;
350 qtype = 0;
351 }
352
353
354 reg = (vector_id) |
355 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
356 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
357 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
358 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
359 wr32(hw, reg_idx, reg);
360 }
361
362
363
364
365 if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
366 (vector_id == 0)) {
367 reg = rd32(hw, I40E_GLINT_CTL);
368 if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
369 reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
370 wr32(hw, I40E_GLINT_CTL, reg);
371 }
372 }
373
374irq_list_done:
375 i40e_flush(hw);
376}
377
378
379
380
381
382
383static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
384{
385 struct i40e_pf *pf = vf->pf;
386 struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info;
387 u32 msix_vf;
388 u32 i;
389
390 if (!vf->qvlist_info)
391 return;
392
393 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
394 for (i = 0; i < qvlist_info->num_vectors; i++) {
395 struct virtchnl_iwarp_qv_info *qv_info;
396 u32 next_q_index, next_q_type;
397 struct i40e_hw *hw = &pf->hw;
398 u32 v_idx, reg_idx, reg;
399
400 qv_info = &qvlist_info->qv_info[i];
401 if (!qv_info)
402 continue;
403 v_idx = qv_info->v_idx;
404 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
405
406
407
408 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
409 reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
410 next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK)
411 >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT;
412 next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK)
413 >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT;
414
415 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
416 reg = (next_q_index &
417 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
418 (next_q_type <<
419 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
420
421 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
422 }
423 }
424 kfree(vf->qvlist_info);
425 vf->qvlist_info = NULL;
426}
427
428
429
430
431
432
433
434
435static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
436 struct virtchnl_iwarp_qvlist_info *qvlist_info)
437{
438 struct i40e_pf *pf = vf->pf;
439 struct i40e_hw *hw = &pf->hw;
440 struct virtchnl_iwarp_qv_info *qv_info;
441 u32 v_idx, i, reg_idx, reg;
442 u32 next_q_idx, next_q_type;
443 u32 msix_vf, size;
444
445 size = sizeof(struct virtchnl_iwarp_qvlist_info) +
446 (sizeof(struct virtchnl_iwarp_qv_info) *
447 (qvlist_info->num_vectors - 1));
448 vf->qvlist_info = kzalloc(size, GFP_KERNEL);
449 if (!vf->qvlist_info)
450 return -ENOMEM;
451
452 vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
453
454 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
455 for (i = 0; i < qvlist_info->num_vectors; i++) {
456 qv_info = &qvlist_info->qv_info[i];
457 if (!qv_info)
458 continue;
459 v_idx = qv_info->v_idx;
460
461
462 if (!i40e_vc_isvalid_vector_id(vf, v_idx))
463 goto err;
464
465 vf->qvlist_info->qv_info[i] = *qv_info;
466
467 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
468
469
470
471
472 reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
473 next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >>
474 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT);
475 next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >>
476 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
477
478 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
479 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
480 reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK |
481 (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) |
482 (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) |
483 (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) |
484 (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT));
485 wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg);
486
487 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
488 reg = (qv_info->ceq_idx &
489 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
490 (I40E_QUEUE_TYPE_PE_CEQ <<
491 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
492 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
493 }
494
495 if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
496 reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK |
497 (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) |
498 (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT));
499
500 wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg);
501 }
502 }
503
504 return 0;
505err:
506 kfree(vf->qvlist_info);
507 vf->qvlist_info = NULL;
508 return -EINVAL;
509}
510
511
512
513
514
515
516
517
518
519
520static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
521 u16 vsi_queue_id,
522 struct virtchnl_txq_info *info)
523{
524 struct i40e_pf *pf = vf->pf;
525 struct i40e_hw *hw = &pf->hw;
526 struct i40e_hmc_obj_txq tx_ctx;
527 struct i40e_vsi *vsi;
528 u16 pf_queue_id;
529 u32 qtx_ctl;
530 int ret = 0;
531
532 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
533 ret = -ENOENT;
534 goto error_context;
535 }
536 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
537 vsi = i40e_find_vsi_from_id(pf, vsi_id);
538 if (!vsi) {
539 ret = -ENOENT;
540 goto error_context;
541 }
542
543
544 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
545
546
547 tx_ctx.base = info->dma_ring_addr / 128;
548 tx_ctx.qlen = info->ring_len;
549 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
550 tx_ctx.rdylist_act = 0;
551 tx_ctx.head_wb_ena = info->headwb_enabled;
552 tx_ctx.head_wb_addr = info->dma_headwb_addr;
553
554
555 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
556 if (ret) {
557 dev_err(&pf->pdev->dev,
558 "Failed to clear VF LAN Tx queue context %d, error: %d\n",
559 pf_queue_id, ret);
560 ret = -ENOENT;
561 goto error_context;
562 }
563
564
565 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
566 if (ret) {
567 dev_err(&pf->pdev->dev,
568 "Failed to set VF LAN Tx queue context %d error: %d\n",
569 pf_queue_id, ret);
570 ret = -ENOENT;
571 goto error_context;
572 }
573
574
575 qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
576 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
577 & I40E_QTX_CTL_PF_INDX_MASK);
578 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
579 << I40E_QTX_CTL_VFVM_INDX_SHIFT)
580 & I40E_QTX_CTL_VFVM_INDX_MASK);
581 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
582 i40e_flush(hw);
583
584error_context:
585 return ret;
586}
587
588
589
590
591
592
593
594
595
596
597static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
598 u16 vsi_queue_id,
599 struct virtchnl_rxq_info *info)
600{
601 struct i40e_pf *pf = vf->pf;
602 struct i40e_hw *hw = &pf->hw;
603 struct i40e_hmc_obj_rxq rx_ctx;
604 u16 pf_queue_id;
605 int ret = 0;
606
607 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
608
609
610 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
611
612
613 rx_ctx.base = info->dma_ring_addr / 128;
614 rx_ctx.qlen = info->ring_len;
615
616 if (info->splithdr_enabled) {
617 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
618 I40E_RX_SPLIT_IP |
619 I40E_RX_SPLIT_TCP_UDP |
620 I40E_RX_SPLIT_SCTP;
621
622 if (info->hdr_size > ((2 * 1024) - 64)) {
623 ret = -EINVAL;
624 goto error_param;
625 }
626 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
627
628
629 rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
630 }
631
632
633 if (info->databuffer_size > ((16 * 1024) - 128)) {
634 ret = -EINVAL;
635 goto error_param;
636 }
637 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
638
639
640 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
641 ret = -EINVAL;
642 goto error_param;
643 }
644 rx_ctx.rxmax = info->max_pkt_size;
645
646
647 rx_ctx.dsize = 1;
648
649
650 rx_ctx.lrxqthresh = 1;
651 rx_ctx.crcstrip = 1;
652 rx_ctx.prefena = 1;
653 rx_ctx.l2tsel = 1;
654
655
656 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
657 if (ret) {
658 dev_err(&pf->pdev->dev,
659 "Failed to clear VF LAN Rx queue context %d, error: %d\n",
660 pf_queue_id, ret);
661 ret = -ENOENT;
662 goto error_param;
663 }
664
665
666 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
667 if (ret) {
668 dev_err(&pf->pdev->dev,
669 "Failed to set VF LAN Rx queue context %d error: %d\n",
670 pf_queue_id, ret);
671 ret = -ENOENT;
672 goto error_param;
673 }
674
675error_param:
676 return ret;
677}
678
679
680
681
682
683
684
685
686static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
687{
688 struct i40e_mac_filter *f = NULL;
689 struct i40e_pf *pf = vf->pf;
690 struct i40e_vsi *vsi;
691 u64 max_tx_rate = 0;
692 int ret = 0;
693
694 vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid,
695 vf->vf_id);
696
697 if (!vsi) {
698 dev_err(&pf->pdev->dev,
699 "add vsi failed for VF %d, aq_err %d\n",
700 vf->vf_id, pf->hw.aq.asq_last_status);
701 ret = -ENOENT;
702 goto error_alloc_vsi_res;
703 }
704
705 if (!idx) {
706 u64 hena = i40e_pf_get_default_rss_hena(pf);
707 u8 broadcast[ETH_ALEN];
708
709 vf->lan_vsi_idx = vsi->idx;
710 vf->lan_vsi_id = vsi->id;
711
712
713
714
715
716
717 if (vf->port_vlan_id)
718 i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
719
720 spin_lock_bh(&vsi->mac_filter_hash_lock);
721 if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
722 f = i40e_add_mac_filter(vsi,
723 vf->default_lan_addr.addr);
724 if (!f)
725 dev_info(&pf->pdev->dev,
726 "Could not add MAC filter %pM for VF %d\n",
727 vf->default_lan_addr.addr, vf->vf_id);
728 }
729 eth_broadcast_addr(broadcast);
730 f = i40e_add_mac_filter(vsi, broadcast);
731 if (!f)
732 dev_info(&pf->pdev->dev,
733 "Could not allocate VF broadcast filter\n");
734 spin_unlock_bh(&vsi->mac_filter_hash_lock);
735 wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
736 wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
737
738 ret = i40e_sync_vsi_filters(vsi);
739 if (ret)
740 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
741 }
742
743
744 if (vf->adq_enabled) {
745 vf->ch[idx].vsi_idx = vsi->idx;
746 vf->ch[idx].vsi_id = vsi->id;
747 }
748
749
750 if (vf->tx_rate) {
751 max_tx_rate = vf->tx_rate;
752 } else if (vf->ch[idx].max_tx_rate) {
753 max_tx_rate = vf->ch[idx].max_tx_rate;
754 }
755
756 if (max_tx_rate) {
757 max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR);
758 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
759 max_tx_rate, 0, NULL);
760 if (ret)
761 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
762 vf->vf_id, ret);
763 }
764
765error_alloc_vsi_res:
766 return ret;
767}
768
769
770
771
772
773
774
775
776static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf)
777{
778 struct i40e_pf *pf = vf->pf;
779 struct i40e_hw *hw = &pf->hw;
780 u32 reg, num_tc = 1;
781 u16 vsi_id, qps;
782 int i, j;
783
784 if (vf->adq_enabled)
785 num_tc = vf->num_tc;
786
787 for (i = 0; i < num_tc; i++) {
788 if (vf->adq_enabled) {
789 qps = vf->ch[i].num_qps;
790 vsi_id = vf->ch[i].vsi_id;
791 } else {
792 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
793 vsi_id = vf->lan_vsi_id;
794 }
795
796 for (j = 0; j < 7; j++) {
797 if (j * 2 >= qps) {
798
799 reg = 0x07FF07FF;
800 } else {
801 u16 qid = i40e_vc_get_pf_queue_id(vf,
802 vsi_id,
803 j * 2);
804 reg = qid;
805 qid = i40e_vc_get_pf_queue_id(vf, vsi_id,
806 (j * 2) + 1);
807 reg |= qid << 16;
808 }
809 i40e_write_rx_ctl(hw,
810 I40E_VSILAN_QTABLE(j, vsi_id),
811 reg);
812 }
813 }
814}
815
816
817
818
819
820
821
822
823static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf)
824{
825 struct i40e_pf *pf = vf->pf;
826 struct i40e_hw *hw = &pf->hw;
827 u32 reg, total_qps = 0;
828 u32 qps, num_tc = 1;
829 u16 vsi_id, qid;
830 int i, j;
831
832 if (vf->adq_enabled)
833 num_tc = vf->num_tc;
834
835 for (i = 0; i < num_tc; i++) {
836 if (vf->adq_enabled) {
837 qps = vf->ch[i].num_qps;
838 vsi_id = vf->ch[i].vsi_id;
839 } else {
840 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
841 vsi_id = vf->lan_vsi_id;
842 }
843
844 for (j = 0; j < qps; j++) {
845 qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j);
846
847 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
848 wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id),
849 reg);
850 total_qps++;
851 }
852 }
853}
854
855
856
857
858
859
860
861static void i40e_enable_vf_mappings(struct i40e_vf *vf)
862{
863 struct i40e_pf *pf = vf->pf;
864 struct i40e_hw *hw = &pf->hw;
865 u32 reg;
866
867
868
869
870
871 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
872 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
873
874
875 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
876 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
877
878 i40e_map_pf_to_vf_queues(vf);
879 i40e_map_pf_queues_to_vsi(vf);
880
881 i40e_flush(hw);
882}
883
884
885
886
887
888
889
890static void i40e_disable_vf_mappings(struct i40e_vf *vf)
891{
892 struct i40e_pf *pf = vf->pf;
893 struct i40e_hw *hw = &pf->hw;
894 int i;
895
896
897 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
898 for (i = 0; i < I40E_MAX_VSI_QP; i++)
899 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
900 I40E_QUEUE_END_OF_LIST);
901 i40e_flush(hw);
902}
903
904
905
906
907
908
909
910static void i40e_free_vf_res(struct i40e_vf *vf)
911{
912 struct i40e_pf *pf = vf->pf;
913 struct i40e_hw *hw = &pf->hw;
914 u32 reg_idx, reg;
915 int i, j, msix_vf;
916
917
918
919
920 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
921
922
923
924
925 if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) {
926 pf->queues_left += vf->num_queue_pairs -
927 I40E_DEFAULT_QUEUES_PER_VF;
928 }
929
930
931 if (vf->lan_vsi_idx) {
932 i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
933 vf->lan_vsi_idx = 0;
934 vf->lan_vsi_id = 0;
935 vf->num_mac = 0;
936 }
937
938
939 if (vf->adq_enabled && vf->ch[0].vsi_idx) {
940 for (j = 0; j < vf->num_tc; j++) {
941
942
943
944
945 if (j)
946 i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]);
947 vf->ch[j].vsi_idx = 0;
948 vf->ch[j].vsi_id = 0;
949 }
950 }
951 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
952
953
954 for (i = 0; i < msix_vf; i++) {
955
956 if (0 == i)
957 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
958 else
959 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
960 (vf->vf_id))
961 + (i - 1));
962 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
963 i40e_flush(hw);
964 }
965
966
967 for (i = 0; i < msix_vf; i++) {
968
969 if (0 == i)
970 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
971 else
972 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
973 (vf->vf_id))
974 + (i - 1));
975 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
976 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
977 wr32(hw, reg_idx, reg);
978 i40e_flush(hw);
979 }
980
981 vf->num_queue_pairs = 0;
982 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
983 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
984}
985
986
987
988
989
990
991
992static int i40e_alloc_vf_res(struct i40e_vf *vf)
993{
994 struct i40e_pf *pf = vf->pf;
995 int total_queue_pairs = 0;
996 int ret, idx;
997
998 if (vf->num_req_queues &&
999 vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF)
1000 pf->num_vf_qps = vf->num_req_queues;
1001 else
1002 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
1003
1004
1005 ret = i40e_alloc_vsi_res(vf, 0);
1006 if (ret)
1007 goto error_alloc;
1008 total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
1009
1010
1011 if (vf->adq_enabled) {
1012 if (pf->queues_left >=
1013 (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) {
1014
1015 for (idx = 1; idx < vf->num_tc; idx++) {
1016 ret = i40e_alloc_vsi_res(vf, idx);
1017 if (ret)
1018 goto error_alloc;
1019 }
1020
1021 total_queue_pairs = I40E_MAX_VF_QUEUES;
1022 } else {
1023 dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n",
1024 vf->vf_id);
1025 vf->adq_enabled = false;
1026 }
1027 }
1028
1029
1030
1031
1032
1033
1034 if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF)
1035 pf->queues_left -=
1036 total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF;
1037
1038 if (vf->trusted)
1039 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1040 else
1041 clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1042
1043
1044
1045
1046 vf->num_queue_pairs = total_queue_pairs;
1047
1048
1049 set_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1050
1051error_alloc:
1052 if (ret)
1053 i40e_free_vf_res(vf);
1054
1055 return ret;
1056}
1057
1058#define VF_DEVICE_STATUS 0xAA
1059#define VF_TRANS_PENDING_MASK 0x20
1060
1061
1062
1063
1064
1065
1066
1067static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
1068{
1069 struct i40e_pf *pf = vf->pf;
1070 struct i40e_hw *hw = &pf->hw;
1071 int vf_abs_id, i;
1072 u32 reg;
1073
1074 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
1075
1076 wr32(hw, I40E_PF_PCI_CIAA,
1077 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
1078 for (i = 0; i < 100; i++) {
1079 reg = rd32(hw, I40E_PF_PCI_CIAD);
1080 if ((reg & VF_TRANS_PENDING_MASK) == 0)
1081 return 0;
1082 udelay(1);
1083 }
1084 return -EIO;
1085}
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
1097{
1098 struct i40e_pf *pf = vf->pf;
1099 struct i40e_hw *hw = &pf->hw;
1100 u32 reg, reg_idx, bit_idx;
1101
1102
1103 clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1104
1105
1106
1107
1108
1109
1110
1111 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1112
1113
1114
1115
1116 if (!flr) {
1117
1118 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1119 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1120 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1121 i40e_flush(hw);
1122 }
1123
1124 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
1125 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
1126 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1127 i40e_flush(hw);
1128
1129 if (i40e_quiesce_vf_pci(vf))
1130 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
1131 vf->vf_id);
1132}
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
1143{
1144 struct i40e_pf *pf = vf->pf;
1145 struct i40e_hw *hw = &pf->hw;
1146 u32 reg;
1147
1148
1149 i40e_free_vf_res(vf);
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1162 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1163 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1164
1165
1166 if (!i40e_alloc_vf_res(vf)) {
1167 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1168 i40e_enable_vf_mappings(vf);
1169 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1170 clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1171
1172 if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
1173 &vf->vf_states))
1174 i40e_notify_client_of_vf_reset(pf, abs_vf_id);
1175 vf->num_vlan = 0;
1176 }
1177
1178
1179
1180
1181
1182 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1183}
1184
1185
1186
1187
1188
1189
1190
1191
1192bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
1193{
1194 struct i40e_pf *pf = vf->pf;
1195 struct i40e_hw *hw = &pf->hw;
1196 bool rsd = false;
1197 u32 reg;
1198 int i;
1199
1200
1201
1202
1203 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1204 return false;
1205
1206 i40e_trigger_vf_reset(vf, flr);
1207
1208
1209
1210
1211 for (i = 0; i < 10; i++) {
1212
1213
1214
1215
1216
1217 usleep_range(10000, 20000);
1218 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1219 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
1220 rsd = true;
1221 break;
1222 }
1223 }
1224
1225 if (flr)
1226 usleep_range(10000, 20000);
1227
1228 if (!rsd)
1229 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1230 vf->vf_id);
1231 usleep_range(10000, 20000);
1232
1233
1234 if (vf->lan_vsi_idx != 0)
1235 i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
1236
1237 i40e_cleanup_reset_vf(vf);
1238
1239 i40e_flush(hw);
1240 clear_bit(__I40E_VF_DISABLE, pf->state);
1241
1242 return true;
1243}
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
1258{
1259 struct i40e_hw *hw = &pf->hw;
1260 struct i40e_vf *vf;
1261 int i, v;
1262 u32 reg;
1263
1264
1265 if (!pf->num_alloc_vfs)
1266 return false;
1267
1268
1269 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1270 return false;
1271
1272
1273 for (v = 0; v < pf->num_alloc_vfs; v++)
1274 i40e_trigger_vf_reset(&pf->vf[v], flr);
1275
1276
1277
1278
1279
1280
1281
1282 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1283 usleep_range(10000, 20000);
1284
1285
1286
1287
1288 while (v < pf->num_alloc_vfs) {
1289 vf = &pf->vf[v];
1290 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1291 if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
1292 break;
1293
1294
1295
1296
1297 v++;
1298 }
1299 }
1300
1301 if (flr)
1302 usleep_range(10000, 20000);
1303
1304
1305
1306
1307 if (v < pf->num_alloc_vfs)
1308 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1309 pf->vf[v].vf_id);
1310 usleep_range(10000, 20000);
1311
1312
1313
1314
1315 for (v = 0; v < pf->num_alloc_vfs; v++) {
1316
1317 if (pf->vf[v].lan_vsi_idx == 0)
1318 continue;
1319
1320 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
1321 }
1322
1323
1324
1325
1326 for (v = 0; v < pf->num_alloc_vfs; v++) {
1327
1328 if (pf->vf[v].lan_vsi_idx == 0)
1329 continue;
1330
1331 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
1332 }
1333
1334
1335
1336
1337 mdelay(50);
1338
1339
1340 for (v = 0; v < pf->num_alloc_vfs; v++)
1341 i40e_cleanup_reset_vf(&pf->vf[v]);
1342
1343 i40e_flush(hw);
1344 clear_bit(__I40E_VF_DISABLE, pf->state);
1345
1346 return true;
1347}
1348
1349
1350
1351
1352
1353
1354
1355void i40e_free_vfs(struct i40e_pf *pf)
1356{
1357 struct i40e_hw *hw = &pf->hw;
1358 u32 reg_idx, bit_idx;
1359 int i, tmp, vf_id;
1360
1361 if (!pf->vf)
1362 return;
1363 while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1364 usleep_range(1000, 2000);
1365
1366 i40e_notify_client_of_vf_enable(pf, 0);
1367
1368
1369 for (i = 0; i < pf->num_alloc_vfs; i++) {
1370 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1371 continue;
1372
1373 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]);
1374 }
1375
1376 for (i = 0; i < pf->num_alloc_vfs; i++) {
1377 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1378 continue;
1379
1380 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
1381 }
1382
1383
1384
1385
1386
1387 if (!pci_vfs_assigned(pf->pdev))
1388 pci_disable_sriov(pf->pdev);
1389 else
1390 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
1391
1392
1393 tmp = pf->num_alloc_vfs;
1394 pf->num_alloc_vfs = 0;
1395 for (i = 0; i < tmp; i++) {
1396 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1397 i40e_free_vf_res(&pf->vf[i]);
1398
1399 i40e_disable_vf_mappings(&pf->vf[i]);
1400 }
1401
1402 kfree(pf->vf);
1403 pf->vf = NULL;
1404
1405
1406
1407
1408
1409 if (!pci_vfs_assigned(pf->pdev)) {
1410
1411
1412
1413 for (vf_id = 0; vf_id < tmp; vf_id++) {
1414 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1415 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1416 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1417 }
1418 }
1419 clear_bit(__I40E_VF_DISABLE, pf->state);
1420}
1421
1422#ifdef CONFIG_PCI_IOV
1423
1424
1425
1426
1427
1428
1429
1430int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
1431{
1432 struct i40e_vf *vfs;
1433 int i, ret = 0;
1434
1435
1436 i40e_irq_dynamic_disable_icr0(pf);
1437
1438
1439 if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
1440 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1441 if (ret) {
1442 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1443 pf->num_alloc_vfs = 0;
1444 goto err_iov;
1445 }
1446 }
1447
1448 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
1449 if (!vfs) {
1450 ret = -ENOMEM;
1451 goto err_alloc;
1452 }
1453 pf->vf = vfs;
1454
1455
1456 for (i = 0; i < num_alloc_vfs; i++) {
1457 vfs[i].pf = pf;
1458 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
1459 vfs[i].vf_id = i;
1460
1461
1462 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1463 vfs[i].spoofchk = true;
1464
1465 set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states);
1466
1467 }
1468 pf->num_alloc_vfs = num_alloc_vfs;
1469
1470
1471 i40e_reset_all_vfs(pf, false);
1472
1473 i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
1474
1475err_alloc:
1476 if (ret)
1477 i40e_free_vfs(pf);
1478err_iov:
1479
1480 i40e_irq_dynamic_enable_icr0(pf);
1481 return ret;
1482}
1483
1484#endif
1485
1486
1487
1488
1489
1490
1491
1492static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
1493{
1494#ifdef CONFIG_PCI_IOV
1495 struct i40e_pf *pf = pci_get_drvdata(pdev);
1496 int pre_existing_vfs = pci_num_vf(pdev);
1497 int err = 0;
1498
1499 if (test_bit(__I40E_TESTING, pf->state)) {
1500 dev_warn(&pdev->dev,
1501 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
1502 err = -EPERM;
1503 goto err_out;
1504 }
1505
1506 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1507 i40e_free_vfs(pf);
1508 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1509 goto out;
1510
1511 if (num_vfs > pf->num_req_vfs) {
1512 dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
1513 num_vfs, pf->num_req_vfs);
1514 err = -EPERM;
1515 goto err_out;
1516 }
1517
1518 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
1519 err = i40e_alloc_vfs(pf, num_vfs);
1520 if (err) {
1521 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
1522 goto err_out;
1523 }
1524
1525out:
1526 return num_vfs;
1527
1528err_out:
1529 return err;
1530#endif
1531 return 0;
1532}
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1543{
1544 struct i40e_pf *pf = pci_get_drvdata(pdev);
1545
1546 if (num_vfs) {
1547 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
1548 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
1549 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
1550 }
1551 return i40e_pci_sriov_enable(pdev, num_vfs);
1552 }
1553
1554 if (!pci_vfs_assigned(pf->pdev)) {
1555 i40e_free_vfs(pf);
1556 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1557 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
1558 } else {
1559 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
1560 return -EINVAL;
1561 }
1562 return 0;
1563}
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1578 u32 v_retval, u8 *msg, u16 msglen)
1579{
1580 struct i40e_pf *pf;
1581 struct i40e_hw *hw;
1582 int abs_vf_id;
1583 i40e_status aq_ret;
1584
1585
1586 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1587 return -EINVAL;
1588
1589 pf = vf->pf;
1590 hw = &pf->hw;
1591 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1592
1593
1594 if (v_retval) {
1595 vf->num_invalid_msgs++;
1596 dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
1597 vf->vf_id, v_opcode, v_retval);
1598 if (vf->num_invalid_msgs >
1599 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
1600 dev_err(&pf->pdev->dev,
1601 "Number of invalid messages exceeded for VF %d\n",
1602 vf->vf_id);
1603 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
1604 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1605 }
1606 } else {
1607 vf->num_valid_msgs++;
1608
1609 vf->num_invalid_msgs = 0;
1610 }
1611
1612 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
1613 msg, msglen, NULL);
1614 if (aq_ret) {
1615 dev_info(&pf->pdev->dev,
1616 "Unable to send the message to VF %d aq_err %d\n",
1617 vf->vf_id, pf->hw.aq.asq_last_status);
1618 return -EIO;
1619 }
1620
1621 return 0;
1622}
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
1633 enum virtchnl_ops opcode,
1634 i40e_status retval)
1635{
1636 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
1637}
1638
1639
1640
1641
1642
1643
1644
1645
1646static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
1647{
1648 struct virtchnl_version_info info = {
1649 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
1650 };
1651
1652 vf->vf_ver = *(struct virtchnl_version_info *)msg;
1653
1654 if (VF_IS_V10(&vf->vf_ver))
1655 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1656 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
1657 I40E_SUCCESS, (u8 *)&info,
1658 sizeof(struct virtchnl_version_info));
1659}
1660
1661
1662
1663
1664
1665static void i40e_del_qch(struct i40e_vf *vf)
1666{
1667 struct i40e_pf *pf = vf->pf;
1668 int i;
1669
1670
1671
1672
1673 for (i = 1; i < vf->num_tc; i++) {
1674 if (vf->ch[i].vsi_idx) {
1675 i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]);
1676 vf->ch[i].vsi_idx = 0;
1677 vf->ch[i].vsi_id = 0;
1678 }
1679 }
1680}
1681
1682
1683
1684
1685
1686
1687
1688
1689static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
1690{
1691 struct virtchnl_vf_resource *vfres = NULL;
1692 struct i40e_pf *pf = vf->pf;
1693 i40e_status aq_ret = 0;
1694 struct i40e_vsi *vsi;
1695 int num_vsis = 1;
1696 int len = 0;
1697 int ret;
1698
1699 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
1700 aq_ret = I40E_ERR_PARAM;
1701 goto err;
1702 }
1703
1704 len = (sizeof(struct virtchnl_vf_resource) +
1705 sizeof(struct virtchnl_vsi_resource) * num_vsis);
1706
1707 vfres = kzalloc(len, GFP_KERNEL);
1708 if (!vfres) {
1709 aq_ret = I40E_ERR_NO_MEMORY;
1710 len = 0;
1711 goto err;
1712 }
1713 if (VF_IS_V11(&vf->vf_ver))
1714 vf->driver_caps = *(u32 *)msg;
1715 else
1716 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
1717 VIRTCHNL_VF_OFFLOAD_RSS_REG |
1718 VIRTCHNL_VF_OFFLOAD_VLAN;
1719
1720 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
1721 vsi = pf->vsi[vf->lan_vsi_idx];
1722 if (!vsi->info.pvid)
1723 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
1724
1725 if (i40e_vf_client_capable(pf, vf->vf_id) &&
1726 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) {
1727 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP;
1728 set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
1729 } else {
1730 clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
1731 }
1732
1733 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1734 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
1735 } else {
1736 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
1737 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
1738 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
1739 else
1740 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
1741 }
1742
1743 if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
1744 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1745 vfres->vf_cap_flags |=
1746 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
1747 }
1748
1749 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
1750 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
1751
1752 if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) &&
1753 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
1754 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
1755
1756 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
1757 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
1758 dev_err(&pf->pdev->dev,
1759 "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
1760 vf->vf_id);
1761 aq_ret = I40E_ERR_PARAM;
1762 goto err;
1763 }
1764 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
1765 }
1766
1767 if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) {
1768 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1769 vfres->vf_cap_flags |=
1770 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
1771 }
1772
1773 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
1774 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
1775
1776 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)
1777 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ;
1778
1779 vfres->num_vsis = num_vsis;
1780 vfres->num_queue_pairs = vf->num_queue_pairs;
1781 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
1782 vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
1783 vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
1784
1785 if (vf->lan_vsi_idx) {
1786 vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
1787 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
1788 vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
1789
1790 vfres->vsi_res[0].qset_handle
1791 = le16_to_cpu(vsi->info.qs_handle[0]);
1792 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
1793 vf->default_lan_addr.addr);
1794 }
1795 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1796
1797err:
1798
1799 ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
1800 aq_ret, (u8 *)vfres, len);
1801
1802 kfree(vfres);
1803 return ret;
1804}
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
1815{
1816 if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
1817 i40e_reset_vf(vf, false);
1818}
1819
1820
1821
1822
1823
1824
1825
1826static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1827{
1828 struct i40e_mac_filter *f;
1829 int num_vlans = 0, bkt;
1830
1831 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1832 if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
1833 num_vlans++;
1834 }
1835
1836 return num_vlans;
1837}
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
1849 u8 *msg, u16 msglen)
1850{
1851 struct virtchnl_promisc_info *info =
1852 (struct virtchnl_promisc_info *)msg;
1853 struct i40e_pf *pf = vf->pf;
1854 struct i40e_hw *hw = &pf->hw;
1855 struct i40e_mac_filter *f;
1856 i40e_status aq_ret = 0;
1857 bool allmulti = false;
1858 struct i40e_vsi *vsi;
1859 bool alluni = false;
1860 int aq_err = 0;
1861 int bkt;
1862
1863 vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
1864 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
1865 !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
1866 !vsi) {
1867 aq_ret = I40E_ERR_PARAM;
1868 goto error_param;
1869 }
1870 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
1871 dev_err(&pf->pdev->dev,
1872 "Unprivileged VF %d is attempting to configure promiscuous mode\n",
1873 vf->vf_id);
1874
1875 aq_ret = 0;
1876 goto error_param;
1877 }
1878
1879 if (info->flags & FLAG_VF_MULTICAST_PROMISC)
1880 allmulti = true;
1881
1882 if (vf->port_vlan_id) {
1883 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid,
1884 allmulti,
1885 vf->port_vlan_id,
1886 NULL);
1887 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
1888 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1889 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
1890 continue;
1891 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw,
1892 vsi->seid,
1893 allmulti,
1894 f->vlan,
1895 NULL);
1896 aq_err = pf->hw.aq.asq_last_status;
1897 if (aq_ret) {
1898 dev_err(&pf->pdev->dev,
1899 "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n",
1900 f->vlan,
1901 i40e_stat_str(&pf->hw, aq_ret),
1902 i40e_aq_str(&pf->hw, aq_err));
1903 break;
1904 }
1905 }
1906 } else {
1907 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
1908 allmulti, NULL);
1909 aq_err = pf->hw.aq.asq_last_status;
1910 if (aq_ret) {
1911 dev_err(&pf->pdev->dev,
1912 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
1913 vf->vf_id,
1914 i40e_stat_str(&pf->hw, aq_ret),
1915 i40e_aq_str(&pf->hw, aq_err));
1916 goto error_param;
1917 }
1918 }
1919
1920 if (!aq_ret) {
1921 dev_info(&pf->pdev->dev,
1922 "VF %d successfully set multicast promiscuous mode\n",
1923 vf->vf_id);
1924 if (allmulti)
1925 set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1926 else
1927 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1928 }
1929
1930 if (info->flags & FLAG_VF_UNICAST_PROMISC)
1931 alluni = true;
1932 if (vf->port_vlan_id) {
1933 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid,
1934 alluni,
1935 vf->port_vlan_id,
1936 NULL);
1937 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
1938 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1939 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
1940 continue;
1941 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw,
1942 vsi->seid,
1943 alluni,
1944 f->vlan,
1945 NULL);
1946 aq_err = pf->hw.aq.asq_last_status;
1947 if (aq_ret)
1948 dev_err(&pf->pdev->dev,
1949 "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n",
1950 f->vlan,
1951 i40e_stat_str(&pf->hw, aq_ret),
1952 i40e_aq_str(&pf->hw, aq_err));
1953 }
1954 } else {
1955 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
1956 alluni, NULL,
1957 true);
1958 aq_err = pf->hw.aq.asq_last_status;
1959 if (aq_ret) {
1960 dev_err(&pf->pdev->dev,
1961 "VF %d failed to set unicast promiscuous mode %8.8x err %s aq_err %s\n",
1962 vf->vf_id, info->flags,
1963 i40e_stat_str(&pf->hw, aq_ret),
1964 i40e_aq_str(&pf->hw, aq_err));
1965 goto error_param;
1966 }
1967 }
1968
1969 if (!aq_ret) {
1970 dev_info(&pf->pdev->dev,
1971 "VF %d successfully set unicast promiscuous mode\n",
1972 vf->vf_id);
1973 if (alluni)
1974 set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
1975 else
1976 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
1977 }
1978
1979error_param:
1980
1981 return i40e_vc_send_resp_to_vf(vf,
1982 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
1983 aq_ret);
1984}
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1996{
1997 struct virtchnl_vsi_queue_config_info *qci =
1998 (struct virtchnl_vsi_queue_config_info *)msg;
1999 struct virtchnl_queue_pair_info *qpi;
2000 struct i40e_pf *pf = vf->pf;
2001 u16 vsi_id, vsi_queue_id = 0;
2002 i40e_status aq_ret = 0;
2003 int i, j = 0, idx = 0;
2004
2005 vsi_id = qci->vsi_id;
2006
2007 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2008 aq_ret = I40E_ERR_PARAM;
2009 goto error_param;
2010 }
2011
2012 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2013 aq_ret = I40E_ERR_PARAM;
2014 goto error_param;
2015 }
2016
2017 for (i = 0; i < qci->num_queue_pairs; i++) {
2018 qpi = &qci->qpair[i];
2019
2020 if (!vf->adq_enabled) {
2021 vsi_queue_id = qpi->txq.queue_id;
2022
2023 if (qpi->txq.vsi_id != qci->vsi_id ||
2024 qpi->rxq.vsi_id != qci->vsi_id ||
2025 qpi->rxq.queue_id != vsi_queue_id) {
2026 aq_ret = I40E_ERR_PARAM;
2027 goto error_param;
2028 }
2029 }
2030
2031 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) {
2032 aq_ret = I40E_ERR_PARAM;
2033 goto error_param;
2034 }
2035
2036 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
2037 &qpi->rxq) ||
2038 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
2039 &qpi->txq)) {
2040 aq_ret = I40E_ERR_PARAM;
2041 goto error_param;
2042 }
2043
2044
2045
2046
2047
2048
2049 if (vf->adq_enabled) {
2050 if (j == (vf->ch[idx].num_qps - 1)) {
2051 idx++;
2052 j = 0;
2053 vsi_queue_id = 0;
2054 } else {
2055 j++;
2056 vsi_queue_id++;
2057 }
2058 vsi_id = vf->ch[idx].vsi_id;
2059 }
2060 }
2061
2062 if (!vf->adq_enabled) {
2063 pf->vsi[vf->lan_vsi_idx]->num_queue_pairs =
2064 qci->num_queue_pairs;
2065 } else {
2066 for (i = 0; i < vf->num_tc; i++)
2067 pf->vsi[vf->ch[i].vsi_idx]->num_queue_pairs =
2068 vf->ch[i].num_qps;
2069 }
2070
2071error_param:
2072
2073 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
2074 aq_ret);
2075}
2076
2077
2078
2079
2080
2081
2082
2083
2084static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
2085 unsigned long queuemap)
2086{
2087 u16 vsi_queue_id, queue_id;
2088
2089 for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
2090 if (vf->adq_enabled) {
2091 vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id;
2092 queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
2093 } else {
2094 queue_id = vsi_queue_id;
2095 }
2096
2097 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id))
2098 return -EINVAL;
2099 }
2100
2101 return 0;
2102}
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2114{
2115 struct virtchnl_irq_map_info *irqmap_info =
2116 (struct virtchnl_irq_map_info *)msg;
2117 struct virtchnl_vector_map *map;
2118 u16 vsi_id, vector_id;
2119 i40e_status aq_ret = 0;
2120 int i;
2121
2122 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2123 aq_ret = I40E_ERR_PARAM;
2124 goto error_param;
2125 }
2126
2127 for (i = 0; i < irqmap_info->num_vectors; i++) {
2128 map = &irqmap_info->vecmap[i];
2129 vector_id = map->vector_id;
2130 vsi_id = map->vsi_id;
2131
2132 if (!i40e_vc_isvalid_vector_id(vf, vector_id) ||
2133 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2134 aq_ret = I40E_ERR_PARAM;
2135 goto error_param;
2136 }
2137
2138 if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
2139 aq_ret = I40E_ERR_PARAM;
2140 goto error_param;
2141 }
2142
2143 if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) {
2144 aq_ret = I40E_ERR_PARAM;
2145 goto error_param;
2146 }
2147
2148 i40e_config_irq_link_list(vf, vsi_id, map);
2149 }
2150error_param:
2151
2152 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
2153 aq_ret);
2154}
2155
2156
2157
2158
2159
2160
2161
2162static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2163 bool enable)
2164{
2165 struct i40e_pf *pf = vsi->back;
2166 int ret = 0;
2167 u16 q_id;
2168
2169 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2170 ret = i40e_control_wait_tx_q(vsi->seid, pf,
2171 vsi->base_queue + q_id,
2172 false , enable);
2173 if (ret)
2174 break;
2175 }
2176 return ret;
2177}
2178
2179
2180
2181
2182
2183
2184
2185static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2186 bool enable)
2187{
2188 struct i40e_pf *pf = vsi->back;
2189 int ret = 0;
2190 u16 q_id;
2191
2192 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2193 ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id,
2194 enable);
2195 if (ret)
2196 break;
2197 }
2198 return ret;
2199}
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2210{
2211 struct virtchnl_queue_select *vqs =
2212 (struct virtchnl_queue_select *)msg;
2213 struct i40e_pf *pf = vf->pf;
2214 u16 vsi_id = vqs->vsi_id;
2215 i40e_status aq_ret = 0;
2216 int i;
2217
2218 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2219 aq_ret = I40E_ERR_PARAM;
2220 goto error_param;
2221 }
2222
2223 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2224 aq_ret = I40E_ERR_PARAM;
2225 goto error_param;
2226 }
2227
2228 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
2229 aq_ret = I40E_ERR_PARAM;
2230 goto error_param;
2231 }
2232
2233
2234 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2235 true)) {
2236 aq_ret = I40E_ERR_TIMEOUT;
2237 goto error_param;
2238 }
2239 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2240 true)) {
2241 aq_ret = I40E_ERR_TIMEOUT;
2242 goto error_param;
2243 }
2244
2245
2246 if (vf->adq_enabled) {
2247
2248 for (i = 1; i < vf->num_tc; i++) {
2249 if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx]))
2250 aq_ret = I40E_ERR_TIMEOUT;
2251 }
2252 }
2253
2254error_param:
2255
2256 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
2257 aq_ret);
2258}
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2270{
2271 struct virtchnl_queue_select *vqs =
2272 (struct virtchnl_queue_select *)msg;
2273 struct i40e_pf *pf = vf->pf;
2274 i40e_status aq_ret = 0;
2275
2276 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2277 aq_ret = I40E_ERR_PARAM;
2278 goto error_param;
2279 }
2280
2281 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2282 aq_ret = I40E_ERR_PARAM;
2283 goto error_param;
2284 }
2285
2286 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
2287 aq_ret = I40E_ERR_PARAM;
2288 goto error_param;
2289 }
2290
2291
2292 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2293 false)) {
2294 aq_ret = I40E_ERR_TIMEOUT;
2295 goto error_param;
2296 }
2297 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2298 false)) {
2299 aq_ret = I40E_ERR_TIMEOUT;
2300 goto error_param;
2301 }
2302error_param:
2303
2304 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
2305 aq_ret);
2306}
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen)
2320{
2321 struct virtchnl_vf_res_request *vfres =
2322 (struct virtchnl_vf_res_request *)msg;
2323 int req_pairs = vfres->num_queue_pairs;
2324 int cur_pairs = vf->num_queue_pairs;
2325 struct i40e_pf *pf = vf->pf;
2326
2327 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
2328 return -EINVAL;
2329
2330 if (req_pairs <= 0) {
2331 dev_err(&pf->pdev->dev,
2332 "VF %d tried to request %d queues. Ignoring.\n",
2333 vf->vf_id, req_pairs);
2334 } else if (req_pairs > I40E_MAX_VF_QUEUES) {
2335 dev_err(&pf->pdev->dev,
2336 "VF %d tried to request more than %d queues.\n",
2337 vf->vf_id,
2338 I40E_MAX_VF_QUEUES);
2339 vfres->num_queue_pairs = I40E_MAX_VF_QUEUES;
2340 } else if (req_pairs - cur_pairs > pf->queues_left) {
2341 dev_warn(&pf->pdev->dev,
2342 "VF %d requested %d more queues, but only %d left.\n",
2343 vf->vf_id,
2344 req_pairs - cur_pairs,
2345 pf->queues_left);
2346 vfres->num_queue_pairs = pf->queues_left + cur_pairs;
2347 } else {
2348
2349 vf->num_req_queues = req_pairs;
2350 i40e_vc_notify_vf_reset(vf);
2351 i40e_reset_vf(vf, false);
2352 return 0;
2353 }
2354
2355 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
2356 (u8 *)vfres, sizeof(*vfres));
2357}
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2368{
2369 struct virtchnl_queue_select *vqs =
2370 (struct virtchnl_queue_select *)msg;
2371 struct i40e_pf *pf = vf->pf;
2372 struct i40e_eth_stats stats;
2373 i40e_status aq_ret = 0;
2374 struct i40e_vsi *vsi;
2375
2376 memset(&stats, 0, sizeof(struct i40e_eth_stats));
2377
2378 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2379 aq_ret = I40E_ERR_PARAM;
2380 goto error_param;
2381 }
2382
2383 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2384 aq_ret = I40E_ERR_PARAM;
2385 goto error_param;
2386 }
2387
2388 vsi = pf->vsi[vf->lan_vsi_idx];
2389 if (!vsi) {
2390 aq_ret = I40E_ERR_PARAM;
2391 goto error_param;
2392 }
2393 i40e_update_eth_stats(vsi);
2394 stats = vsi->eth_stats;
2395
2396error_param:
2397
2398 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
2399 (u8 *)&stats, sizeof(stats));
2400}
2401
2402
2403#define I40E_VC_MAX_MAC_ADDR_PER_VF 12
2404#define I40E_VC_MAX_VLAN_PER_VF 8
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423static inline int i40e_check_vf_permission(struct i40e_vf *vf,
2424 struct virtchnl_ether_addr_list *al)
2425{
2426 struct i40e_pf *pf = vf->pf;
2427 int i;
2428
2429
2430
2431
2432
2433 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2434 (vf->num_mac + al->num_elements) > I40E_VC_MAX_MAC_ADDR_PER_VF) {
2435 dev_err(&pf->pdev->dev,
2436 "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
2437 return -EPERM;
2438 }
2439
2440 for (i = 0; i < al->num_elements; i++) {
2441 u8 *addr = al->list[i].addr;
2442
2443 if (is_broadcast_ether_addr(addr) ||
2444 is_zero_ether_addr(addr)) {
2445 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
2446 addr);
2447 return I40E_ERR_INVALID_MAC_ADDR;
2448 }
2449
2450
2451
2452
2453
2454
2455
2456
2457 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2458 !is_multicast_ether_addr(addr) && vf->pf_set_mac &&
2459 !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
2460 dev_err(&pf->pdev->dev,
2461 "VF attempting to override administratively set MAC address, reload the VF driver to resume normal operation\n");
2462 return -EPERM;
2463 }
2464 }
2465
2466 return 0;
2467}
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2478{
2479 struct virtchnl_ether_addr_list *al =
2480 (struct virtchnl_ether_addr_list *)msg;
2481 struct i40e_pf *pf = vf->pf;
2482 struct i40e_vsi *vsi = NULL;
2483 u16 vsi_id = al->vsi_id;
2484 i40e_status ret = 0;
2485 int i;
2486
2487 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2488 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2489 ret = I40E_ERR_PARAM;
2490 goto error_param;
2491 }
2492
2493 vsi = pf->vsi[vf->lan_vsi_idx];
2494
2495
2496
2497
2498 spin_lock_bh(&vsi->mac_filter_hash_lock);
2499
2500 ret = i40e_check_vf_permission(vf, al);
2501 if (ret) {
2502 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2503 goto error_param;
2504 }
2505
2506
2507 for (i = 0; i < al->num_elements; i++) {
2508 struct i40e_mac_filter *f;
2509
2510 f = i40e_find_mac(vsi, al->list[i].addr);
2511 if (!f) {
2512 f = i40e_add_mac_filter(vsi, al->list[i].addr);
2513
2514 if (!f) {
2515 dev_err(&pf->pdev->dev,
2516 "Unable to add MAC filter %pM for VF %d\n",
2517 al->list[i].addr, vf->vf_id);
2518 ret = I40E_ERR_PARAM;
2519 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2520 goto error_param;
2521 } else {
2522 vf->num_mac++;
2523 }
2524 }
2525 }
2526 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2527
2528
2529 ret = i40e_sync_vsi_filters(vsi);
2530 if (ret)
2531 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2532 vf->vf_id, ret);
2533
2534error_param:
2535
2536 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
2537 ret);
2538}
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2549{
2550 struct virtchnl_ether_addr_list *al =
2551 (struct virtchnl_ether_addr_list *)msg;
2552 struct i40e_pf *pf = vf->pf;
2553 struct i40e_vsi *vsi = NULL;
2554 u16 vsi_id = al->vsi_id;
2555 i40e_status ret = 0;
2556 int i;
2557
2558 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2559 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2560 ret = I40E_ERR_PARAM;
2561 goto error_param;
2562 }
2563
2564 for (i = 0; i < al->num_elements; i++) {
2565 if (is_broadcast_ether_addr(al->list[i].addr) ||
2566 is_zero_ether_addr(al->list[i].addr)) {
2567 dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
2568 al->list[i].addr, vf->vf_id);
2569 ret = I40E_ERR_INVALID_MAC_ADDR;
2570 goto error_param;
2571 }
2572
2573 if (vf->pf_set_mac &&
2574 ether_addr_equal(al->list[i].addr,
2575 vf->default_lan_addr.addr)) {
2576 dev_err(&pf->pdev->dev,
2577 "MAC addr %pM has been set by PF, cannot delete it for VF %d, reset VF to change MAC addr\n",
2578 vf->default_lan_addr.addr, vf->vf_id);
2579 ret = I40E_ERR_PARAM;
2580 goto error_param;
2581 }
2582 }
2583 vsi = pf->vsi[vf->lan_vsi_idx];
2584
2585 spin_lock_bh(&vsi->mac_filter_hash_lock);
2586
2587 for (i = 0; i < al->num_elements; i++)
2588 if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
2589 ret = I40E_ERR_INVALID_MAC_ADDR;
2590 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2591 goto error_param;
2592 } else {
2593 vf->num_mac--;
2594 }
2595
2596 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2597
2598
2599 ret = i40e_sync_vsi_filters(vsi);
2600 if (ret)
2601 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2602 vf->vf_id, ret);
2603
2604error_param:
2605
2606 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
2607 ret);
2608}
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2619{
2620 struct virtchnl_vlan_filter_list *vfl =
2621 (struct virtchnl_vlan_filter_list *)msg;
2622 struct i40e_pf *pf = vf->pf;
2623 struct i40e_vsi *vsi = NULL;
2624 u16 vsi_id = vfl->vsi_id;
2625 i40e_status aq_ret = 0;
2626 int i;
2627
2628 if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
2629 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2630 dev_err(&pf->pdev->dev,
2631 "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
2632 goto error_param;
2633 }
2634 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2635 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2636 aq_ret = I40E_ERR_PARAM;
2637 goto error_param;
2638 }
2639
2640 for (i = 0; i < vfl->num_elements; i++) {
2641 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
2642 aq_ret = I40E_ERR_PARAM;
2643 dev_err(&pf->pdev->dev,
2644 "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
2645 goto error_param;
2646 }
2647 }
2648 vsi = pf->vsi[vf->lan_vsi_idx];
2649 if (vsi->info.pvid) {
2650 aq_ret = I40E_ERR_PARAM;
2651 goto error_param;
2652 }
2653
2654 i40e_vlan_stripping_enable(vsi);
2655 for (i = 0; i < vfl->num_elements; i++) {
2656
2657 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
2658 if (!ret)
2659 vf->num_vlan++;
2660
2661 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
2662 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
2663 true,
2664 vfl->vlan_id[i],
2665 NULL);
2666 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
2667 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
2668 true,
2669 vfl->vlan_id[i],
2670 NULL);
2671
2672 if (ret)
2673 dev_err(&pf->pdev->dev,
2674 "Unable to add VLAN filter %d for VF %d, error %d\n",
2675 vfl->vlan_id[i], vf->vf_id, ret);
2676 }
2677
2678error_param:
2679
2680 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret);
2681}
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2692{
2693 struct virtchnl_vlan_filter_list *vfl =
2694 (struct virtchnl_vlan_filter_list *)msg;
2695 struct i40e_pf *pf = vf->pf;
2696 struct i40e_vsi *vsi = NULL;
2697 u16 vsi_id = vfl->vsi_id;
2698 i40e_status aq_ret = 0;
2699 int i;
2700
2701 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2702 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2703 aq_ret = I40E_ERR_PARAM;
2704 goto error_param;
2705 }
2706
2707 for (i = 0; i < vfl->num_elements; i++) {
2708 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
2709 aq_ret = I40E_ERR_PARAM;
2710 goto error_param;
2711 }
2712 }
2713
2714 vsi = pf->vsi[vf->lan_vsi_idx];
2715 if (vsi->info.pvid) {
2716 aq_ret = I40E_ERR_PARAM;
2717 goto error_param;
2718 }
2719
2720 for (i = 0; i < vfl->num_elements; i++) {
2721 i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
2722 vf->num_vlan--;
2723
2724 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
2725 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
2726 false,
2727 vfl->vlan_id[i],
2728 NULL);
2729 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
2730 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
2731 false,
2732 vfl->vlan_id[i],
2733 NULL);
2734 }
2735
2736error_param:
2737
2738 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret);
2739}
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2750{
2751 struct i40e_pf *pf = vf->pf;
2752 int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
2753 i40e_status aq_ret = 0;
2754
2755 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2756 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
2757 aq_ret = I40E_ERR_PARAM;
2758 goto error_param;
2759 }
2760
2761 i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
2762 msg, msglen);
2763
2764error_param:
2765
2766 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP,
2767 aq_ret);
2768}
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen,
2780 bool config)
2781{
2782 struct virtchnl_iwarp_qvlist_info *qvlist_info =
2783 (struct virtchnl_iwarp_qvlist_info *)msg;
2784 i40e_status aq_ret = 0;
2785
2786 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2787 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
2788 aq_ret = I40E_ERR_PARAM;
2789 goto error_param;
2790 }
2791
2792 if (config) {
2793 if (i40e_config_iwarp_qvlist(vf, qvlist_info))
2794 aq_ret = I40E_ERR_PARAM;
2795 } else {
2796 i40e_release_iwarp_qvlist(vf);
2797 }
2798
2799error_param:
2800
2801 return i40e_vc_send_resp_to_vf(vf,
2802 config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP :
2803 VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
2804 aq_ret);
2805}
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen)
2816{
2817 struct virtchnl_rss_key *vrk =
2818 (struct virtchnl_rss_key *)msg;
2819 struct i40e_pf *pf = vf->pf;
2820 struct i40e_vsi *vsi = NULL;
2821 u16 vsi_id = vrk->vsi_id;
2822 i40e_status aq_ret = 0;
2823
2824 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2825 !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
2826 (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
2827 aq_ret = I40E_ERR_PARAM;
2828 goto err;
2829 }
2830
2831 vsi = pf->vsi[vf->lan_vsi_idx];
2832 aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
2833err:
2834
2835 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
2836 aq_ret);
2837}
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen)
2848{
2849 struct virtchnl_rss_lut *vrl =
2850 (struct virtchnl_rss_lut *)msg;
2851 struct i40e_pf *pf = vf->pf;
2852 struct i40e_vsi *vsi = NULL;
2853 u16 vsi_id = vrl->vsi_id;
2854 i40e_status aq_ret = 0;
2855
2856 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2857 !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
2858 (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
2859 aq_ret = I40E_ERR_PARAM;
2860 goto err;
2861 }
2862
2863 vsi = pf->vsi[vf->lan_vsi_idx];
2864 aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
2865
2866err:
2867 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
2868 aq_ret);
2869}
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
2880{
2881 struct virtchnl_rss_hena *vrh = NULL;
2882 struct i40e_pf *pf = vf->pf;
2883 i40e_status aq_ret = 0;
2884 int len = 0;
2885
2886 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2887 aq_ret = I40E_ERR_PARAM;
2888 goto err;
2889 }
2890 len = sizeof(struct virtchnl_rss_hena);
2891
2892 vrh = kzalloc(len, GFP_KERNEL);
2893 if (!vrh) {
2894 aq_ret = I40E_ERR_NO_MEMORY;
2895 len = 0;
2896 goto err;
2897 }
2898 vrh->hena = i40e_pf_get_default_rss_hena(pf);
2899err:
2900
2901 aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
2902 aq_ret, (u8 *)vrh, len);
2903 kfree(vrh);
2904 return aq_ret;
2905}
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
2916{
2917 struct virtchnl_rss_hena *vrh =
2918 (struct virtchnl_rss_hena *)msg;
2919 struct i40e_pf *pf = vf->pf;
2920 struct i40e_hw *hw = &pf->hw;
2921 i40e_status aq_ret = 0;
2922
2923 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2924 aq_ret = I40E_ERR_PARAM;
2925 goto err;
2926 }
2927 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
2928 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
2929 (u32)(vrh->hena >> 32));
2930
2931
2932err:
2933 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
2934}
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg,
2945 u16 msglen)
2946{
2947 struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
2948 i40e_status aq_ret = 0;
2949
2950 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2951 aq_ret = I40E_ERR_PARAM;
2952 goto err;
2953 }
2954
2955 i40e_vlan_stripping_enable(vsi);
2956
2957
2958err:
2959 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
2960 aq_ret);
2961}
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg,
2972 u16 msglen)
2973{
2974 struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
2975 i40e_status aq_ret = 0;
2976
2977 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2978 aq_ret = I40E_ERR_PARAM;
2979 goto err;
2980 }
2981
2982 i40e_vlan_stripping_disable(vsi);
2983
2984
2985err:
2986 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
2987 aq_ret);
2988}
2989
2990
2991
2992
2993
2994
2995
2996
2997static int i40e_validate_cloud_filter(struct i40e_vf *vf,
2998 struct virtchnl_filter *tc_filter)
2999{
3000 struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec;
3001 struct virtchnl_l4_spec data = tc_filter->data.tcp_spec;
3002 struct i40e_pf *pf = vf->pf;
3003 struct i40e_vsi *vsi = NULL;
3004 struct i40e_mac_filter *f;
3005 struct hlist_node *h;
3006 bool found = false;
3007 int bkt;
3008
3009 if (!tc_filter->action) {
3010 dev_info(&pf->pdev->dev,
3011 "VF %d: Currently ADq doesn't support Drop Action\n",
3012 vf->vf_id);
3013 goto err;
3014 }
3015
3016
3017 if (!tc_filter->action_meta ||
3018 tc_filter->action_meta > I40E_MAX_VF_VSI) {
3019 dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
3020 vf->vf_id, tc_filter->action_meta);
3021 goto err;
3022 }
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033 if (mask.dst_mac[0] && !mask.dst_ip[0]) {
3034 vsi = pf->vsi[vf->lan_vsi_idx];
3035 f = i40e_find_mac(vsi, data.dst_mac);
3036
3037 if (!f) {
3038 dev_info(&pf->pdev->dev,
3039 "Destination MAC %pM doesn't belong to VF %d\n",
3040 data.dst_mac, vf->vf_id);
3041 goto err;
3042 }
3043
3044 if (mask.vlan_id) {
3045 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f,
3046 hlist) {
3047 if (f->vlan == ntohs(data.vlan_id)) {
3048 found = true;
3049 break;
3050 }
3051 }
3052 if (!found) {
3053 dev_info(&pf->pdev->dev,
3054 "VF %d doesn't have any VLAN id %u\n",
3055 vf->vf_id, ntohs(data.vlan_id));
3056 goto err;
3057 }
3058 }
3059 } else {
3060
3061 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
3062 dev_err(&pf->pdev->dev,
3063 "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n",
3064 vf->vf_id);
3065 return I40E_ERR_CONFIG;
3066 }
3067 }
3068
3069 if (mask.dst_mac[0] & data.dst_mac[0]) {
3070 if (is_broadcast_ether_addr(data.dst_mac) ||
3071 is_zero_ether_addr(data.dst_mac)) {
3072 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n",
3073 vf->vf_id, data.dst_mac);
3074 goto err;
3075 }
3076 }
3077
3078 if (mask.src_mac[0] & data.src_mac[0]) {
3079 if (is_broadcast_ether_addr(data.src_mac) ||
3080 is_zero_ether_addr(data.src_mac)) {
3081 dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n",
3082 vf->vf_id, data.src_mac);
3083 goto err;
3084 }
3085 }
3086
3087 if (mask.dst_port & data.dst_port) {
3088 if (!data.dst_port || be16_to_cpu(data.dst_port) > 0xFFFF) {
3089 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n",
3090 vf->vf_id);
3091 goto err;
3092 }
3093 }
3094
3095 if (mask.src_port & data.src_port) {
3096 if (!data.src_port || be16_to_cpu(data.src_port) > 0xFFFF) {
3097 dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n",
3098 vf->vf_id);
3099 goto err;
3100 }
3101 }
3102
3103 if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW &&
3104 tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) {
3105 dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n",
3106 vf->vf_id);
3107 goto err;
3108 }
3109
3110 if (mask.vlan_id & data.vlan_id) {
3111 if (ntohs(data.vlan_id) > I40E_MAX_VLANID) {
3112 dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n",
3113 vf->vf_id);
3114 goto err;
3115 }
3116 }
3117
3118 return I40E_SUCCESS;
3119err:
3120 return I40E_ERR_CONFIG;
3121}
3122
3123
3124
3125
3126
3127
3128static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid)
3129{
3130 struct i40e_pf *pf = vf->pf;
3131 struct i40e_vsi *vsi = NULL;
3132 int i;
3133
3134 for (i = 0; i < vf->num_tc ; i++) {
3135 vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id);
3136 if (vsi && vsi->seid == seid)
3137 return vsi;
3138 }
3139 return NULL;
3140}
3141
3142
3143
3144
3145
3146
3147
3148static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
3149{
3150 struct i40e_cloud_filter *cfilter = NULL;
3151 struct i40e_pf *pf = vf->pf;
3152 struct i40e_vsi *vsi = NULL;
3153 struct hlist_node *node;
3154 int ret;
3155
3156 hlist_for_each_entry_safe(cfilter, node,
3157 &vf->cloud_filter_list, cloud_node) {
3158 vsi = i40e_find_vsi_from_seid(vf, cfilter->seid);
3159
3160 if (!vsi) {
3161 dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n",
3162 vf->vf_id, cfilter->seid);
3163 continue;
3164 }
3165
3166 if (cfilter->dst_port)
3167 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
3168 false);
3169 else
3170 ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
3171 if (ret)
3172 dev_err(&pf->pdev->dev,
3173 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
3174 vf->vf_id, i40e_stat_str(&pf->hw, ret),
3175 i40e_aq_str(&pf->hw,
3176 pf->hw.aq.asq_last_status));
3177
3178 hlist_del(&cfilter->cloud_node);
3179 kfree(cfilter);
3180 vf->num_cloud_filters--;
3181 }
3182}
3183
3184
3185
3186
3187
3188
3189
3190
3191static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
3192{
3193 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3194 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3195 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3196 struct i40e_cloud_filter cfilter, *cf = NULL;
3197 struct i40e_pf *pf = vf->pf;
3198 struct i40e_vsi *vsi = NULL;
3199 struct hlist_node *node;
3200 i40e_status aq_ret = 0;
3201 int i, ret;
3202
3203 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3204 aq_ret = I40E_ERR_PARAM;
3205 goto err;
3206 }
3207
3208 if (!vf->adq_enabled) {
3209 dev_info(&pf->pdev->dev,
3210 "VF %d: ADq not enabled, can't apply cloud filter\n",
3211 vf->vf_id);
3212 aq_ret = I40E_ERR_PARAM;
3213 goto err;
3214 }
3215
3216 if (i40e_validate_cloud_filter(vf, vcf)) {
3217 dev_info(&pf->pdev->dev,
3218 "VF %d: Invalid input, can't apply cloud filter\n",
3219 vf->vf_id);
3220 aq_ret = I40E_ERR_PARAM;
3221 goto err;
3222 }
3223
3224 memset(&cfilter, 0, sizeof(cfilter));
3225
3226 for (i = 0; i < ETH_ALEN; i++)
3227 cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3228
3229
3230 for (i = 0; i < ETH_ALEN; i++)
3231 cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3232
3233 cfilter.vlan_id = mask.vlan_id & tcf.vlan_id;
3234 cfilter.dst_port = mask.dst_port & tcf.dst_port;
3235 cfilter.src_port = mask.src_port & tcf.src_port;
3236
3237 switch (vcf->flow_type) {
3238 case VIRTCHNL_TCP_V4_FLOW:
3239 cfilter.n_proto = ETH_P_IP;
3240 if (mask.dst_ip[0] & tcf.dst_ip[0])
3241 memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip,
3242 ARRAY_SIZE(tcf.dst_ip));
3243 else if (mask.src_ip[0] & tcf.dst_ip[0])
3244 memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip,
3245 ARRAY_SIZE(tcf.dst_ip));
3246 break;
3247 case VIRTCHNL_TCP_V6_FLOW:
3248 cfilter.n_proto = ETH_P_IPV6;
3249 if (mask.dst_ip[3] & tcf.dst_ip[3])
3250 memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip,
3251 sizeof(cfilter.ip.v6.dst_ip6));
3252 if (mask.src_ip[3] & tcf.src_ip[3])
3253 memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip,
3254 sizeof(cfilter.ip.v6.src_ip6));
3255 break;
3256 default:
3257
3258
3259
3260 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3261 vf->vf_id);
3262 }
3263
3264
3265 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3266 cfilter.seid = vsi->seid;
3267 cfilter.flags = vcf->field_flags;
3268
3269
3270 if (tcf.dst_port)
3271 ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false);
3272 else
3273 ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
3274 if (ret) {
3275 dev_err(&pf->pdev->dev,
3276 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
3277 vf->vf_id, i40e_stat_str(&pf->hw, ret),
3278 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3279 goto err;
3280 }
3281
3282 hlist_for_each_entry_safe(cf, node,
3283 &vf->cloud_filter_list, cloud_node) {
3284 if (cf->seid != cfilter.seid)
3285 continue;
3286 if (mask.dst_port)
3287 if (cfilter.dst_port != cf->dst_port)
3288 continue;
3289 if (mask.dst_mac[0])
3290 if (!ether_addr_equal(cf->src_mac, cfilter.src_mac))
3291 continue;
3292
3293 if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0])
3294 if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip,
3295 ARRAY_SIZE(tcf.dst_ip)))
3296 continue;
3297
3298 if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3])
3299 if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6,
3300 sizeof(cfilter.ip.v6.src_ip6)))
3301 continue;
3302 if (mask.vlan_id)
3303 if (cfilter.vlan_id != cf->vlan_id)
3304 continue;
3305
3306 hlist_del(&cf->cloud_node);
3307 kfree(cf);
3308 vf->num_cloud_filters--;
3309 }
3310
3311err:
3312 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER,
3313 aq_ret);
3314}
3315
3316
3317
3318
3319
3320
3321
3322
3323static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
3324{
3325 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3326 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3327 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3328 struct i40e_cloud_filter *cfilter = NULL;
3329 struct i40e_pf *pf = vf->pf;
3330 struct i40e_vsi *vsi = NULL;
3331 i40e_status aq_ret = 0;
3332 int i, ret;
3333
3334 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3335 aq_ret = I40E_ERR_PARAM;
3336 goto err;
3337 }
3338
3339 if (!vf->adq_enabled) {
3340 dev_info(&pf->pdev->dev,
3341 "VF %d: ADq is not enabled, can't apply cloud filter\n",
3342 vf->vf_id);
3343 aq_ret = I40E_ERR_PARAM;
3344 goto err;
3345 }
3346
3347 if (i40e_validate_cloud_filter(vf, vcf)) {
3348 dev_info(&pf->pdev->dev,
3349 "VF %d: Invalid input/s, can't apply cloud filter\n",
3350 vf->vf_id);
3351 aq_ret = I40E_ERR_PARAM;
3352 goto err;
3353 }
3354
3355 cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
3356 if (!cfilter)
3357 return -ENOMEM;
3358
3359
3360 for (i = 0; i < ETH_ALEN; i++)
3361 cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3362
3363
3364 for (i = 0; i < ETH_ALEN; i++)
3365 cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3366
3367 cfilter->vlan_id = mask.vlan_id & tcf.vlan_id;
3368 cfilter->dst_port = mask.dst_port & tcf.dst_port;
3369 cfilter->src_port = mask.src_port & tcf.src_port;
3370
3371 switch (vcf->flow_type) {
3372 case VIRTCHNL_TCP_V4_FLOW:
3373 cfilter->n_proto = ETH_P_IP;
3374 if (mask.dst_ip[0] & tcf.dst_ip[0])
3375 memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip,
3376 ARRAY_SIZE(tcf.dst_ip));
3377 else if (mask.src_ip[0] & tcf.dst_ip[0])
3378 memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip,
3379 ARRAY_SIZE(tcf.dst_ip));
3380 break;
3381 case VIRTCHNL_TCP_V6_FLOW:
3382 cfilter->n_proto = ETH_P_IPV6;
3383 if (mask.dst_ip[3] & tcf.dst_ip[3])
3384 memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip,
3385 sizeof(cfilter->ip.v6.dst_ip6));
3386 if (mask.src_ip[3] & tcf.src_ip[3])
3387 memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip,
3388 sizeof(cfilter->ip.v6.src_ip6));
3389 break;
3390 default:
3391
3392
3393
3394 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3395 vf->vf_id);
3396 }
3397
3398
3399 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3400 cfilter->seid = vsi->seid;
3401 cfilter->flags = vcf->field_flags;
3402
3403
3404 if (tcf.dst_port)
3405 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
3406 else
3407 ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
3408 if (ret) {
3409 dev_err(&pf->pdev->dev,
3410 "VF %d: Failed to add cloud filter, err %s aq_err %s\n",
3411 vf->vf_id, i40e_stat_str(&pf->hw, ret),
3412 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3413 goto err;
3414 }
3415
3416 INIT_HLIST_NODE(&cfilter->cloud_node);
3417 hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list);
3418 vf->num_cloud_filters++;
3419err:
3420 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER,
3421 aq_ret);
3422}
3423
3424
3425
3426
3427
3428
3429static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
3430{
3431 struct virtchnl_tc_info *tci =
3432 (struct virtchnl_tc_info *)msg;
3433 struct i40e_pf *pf = vf->pf;
3434 struct i40e_link_status *ls = &pf->hw.phy.link_info;
3435 int i, adq_request_qps = 0, speed = 0;
3436 i40e_status aq_ret = 0;
3437
3438 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3439 aq_ret = I40E_ERR_PARAM;
3440 goto err;
3441 }
3442
3443
3444 if (vf->spoofchk) {
3445 dev_err(&pf->pdev->dev,
3446 "Spoof check is ON, turn it OFF to enable ADq\n");
3447 aq_ret = I40E_ERR_PARAM;
3448 goto err;
3449 }
3450
3451 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) {
3452 dev_err(&pf->pdev->dev,
3453 "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n",
3454 vf->vf_id);
3455 aq_ret = I40E_ERR_PARAM;
3456 goto err;
3457 }
3458
3459
3460 if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) {
3461 dev_err(&pf->pdev->dev,
3462 "VF %d trying to set %u TCs, valid range 1-4 TCs per VF\n",
3463 vf->vf_id, tci->num_tc);
3464 aq_ret = I40E_ERR_PARAM;
3465 goto err;
3466 }
3467
3468
3469 for (i = 0; i < tci->num_tc; i++)
3470 if (!tci->list[i].count ||
3471 tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) {
3472 dev_err(&pf->pdev->dev,
3473 "VF %d: TC %d trying to set %u queues, valid range 1-4 queues per TC\n",
3474 vf->vf_id, i, tci->list[i].count);
3475 aq_ret = I40E_ERR_PARAM;
3476 goto err;
3477 }
3478
3479
3480 adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF;
3481
3482 if (pf->queues_left < adq_request_qps) {
3483 dev_err(&pf->pdev->dev,
3484 "No queues left to allocate to VF %d\n",
3485 vf->vf_id);
3486 aq_ret = I40E_ERR_PARAM;
3487 goto err;
3488 } else {
3489
3490
3491
3492
3493 vf->num_queue_pairs = I40E_MAX_VF_QUEUES;
3494 }
3495
3496
3497 switch (ls->link_speed) {
3498 case VIRTCHNL_LINK_SPEED_100MB:
3499 speed = SPEED_100;
3500 break;
3501 case VIRTCHNL_LINK_SPEED_1GB:
3502 speed = SPEED_1000;
3503 break;
3504 case VIRTCHNL_LINK_SPEED_10GB:
3505 speed = SPEED_10000;
3506 break;
3507 case VIRTCHNL_LINK_SPEED_20GB:
3508 speed = SPEED_20000;
3509 break;
3510 case VIRTCHNL_LINK_SPEED_25GB:
3511 speed = SPEED_25000;
3512 break;
3513 case VIRTCHNL_LINK_SPEED_40GB:
3514 speed = SPEED_40000;
3515 break;
3516 default:
3517 dev_err(&pf->pdev->dev,
3518 "Cannot detect link speed\n");
3519 aq_ret = I40E_ERR_PARAM;
3520 goto err;
3521 }
3522
3523
3524 vf->num_tc = tci->num_tc;
3525 for (i = 0; i < vf->num_tc; i++) {
3526 if (tci->list[i].max_tx_rate) {
3527 if (tci->list[i].max_tx_rate > speed) {
3528 dev_err(&pf->pdev->dev,
3529 "Invalid max tx rate %llu specified for VF %d.",
3530 tci->list[i].max_tx_rate,
3531 vf->vf_id);
3532 aq_ret = I40E_ERR_PARAM;
3533 goto err;
3534 } else {
3535 vf->ch[i].max_tx_rate =
3536 tci->list[i].max_tx_rate;
3537 }
3538 }
3539 vf->ch[i].num_qps = tci->list[i].count;
3540 }
3541
3542
3543 vf->adq_enabled = true;
3544
3545
3546
3547
3548 vf->num_req_queues = 0;
3549
3550
3551 i40e_vc_notify_vf_reset(vf);
3552 i40e_reset_vf(vf, false);
3553
3554 return I40E_SUCCESS;
3555
3556
3557err:
3558 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS,
3559 aq_ret);
3560}
3561
3562
3563
3564
3565
3566
3567static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
3568{
3569 struct i40e_pf *pf = vf->pf;
3570 i40e_status aq_ret = 0;
3571
3572 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3573 aq_ret = I40E_ERR_PARAM;
3574 goto err;
3575 }
3576
3577 if (vf->adq_enabled) {
3578 i40e_del_all_cloud_filters(vf);
3579 i40e_del_qch(vf);
3580 vf->adq_enabled = false;
3581 vf->num_tc = 0;
3582 dev_info(&pf->pdev->dev,
3583 "Deleting Queue Channels and cloud filters for ADq on VF %d\n",
3584 vf->vf_id);
3585 } else {
3586 dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n",
3587 vf->vf_id);
3588 aq_ret = I40E_ERR_PARAM;
3589 }
3590
3591
3592 i40e_vc_notify_vf_reset(vf);
3593 i40e_reset_vf(vf, false);
3594
3595 return I40E_SUCCESS;
3596
3597err:
3598 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS,
3599 aq_ret);
3600}
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
3615 u32 __always_unused v_retval, u8 *msg, u16 msglen)
3616{
3617 struct i40e_hw *hw = &pf->hw;
3618 int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
3619 struct i40e_vf *vf;
3620 int ret;
3621
3622 pf->vf_aq_requests++;
3623 if (local_vf_id >= pf->num_alloc_vfs)
3624 return -EINVAL;
3625 vf = &(pf->vf[local_vf_id]);
3626
3627
3628 if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
3629 return I40E_ERR_PARAM;
3630
3631
3632 ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
3633
3634
3635 if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) {
3636 struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg;
3637
3638 if (vrk->key_len != I40E_HKEY_ARRAY_SIZE)
3639 ret = -EINVAL;
3640 } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) {
3641 struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
3642
3643 if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)
3644 ret = -EINVAL;
3645 }
3646
3647 if (ret) {
3648 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
3649 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
3650 local_vf_id, v_opcode, msglen);
3651 switch (ret) {
3652 case VIRTCHNL_ERR_PARAM:
3653 return -EPERM;
3654 default:
3655 return -EINVAL;
3656 }
3657 }
3658
3659 switch (v_opcode) {
3660 case VIRTCHNL_OP_VERSION:
3661 ret = i40e_vc_get_version_msg(vf, msg);
3662 break;
3663 case VIRTCHNL_OP_GET_VF_RESOURCES:
3664 ret = i40e_vc_get_vf_resources_msg(vf, msg);
3665 i40e_vc_notify_vf_link_state(vf);
3666 break;
3667 case VIRTCHNL_OP_RESET_VF:
3668 i40e_vc_reset_vf_msg(vf);
3669 ret = 0;
3670 break;
3671 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
3672 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
3673 break;
3674 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
3675 ret = i40e_vc_config_queues_msg(vf, msg, msglen);
3676 break;
3677 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
3678 ret = i40e_vc_config_irq_map_msg(vf, msg, msglen);
3679 break;
3680 case VIRTCHNL_OP_ENABLE_QUEUES:
3681 ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
3682 i40e_vc_notify_vf_link_state(vf);
3683 break;
3684 case VIRTCHNL_OP_DISABLE_QUEUES:
3685 ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
3686 break;
3687 case VIRTCHNL_OP_ADD_ETH_ADDR:
3688 ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen);
3689 break;
3690 case VIRTCHNL_OP_DEL_ETH_ADDR:
3691 ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen);
3692 break;
3693 case VIRTCHNL_OP_ADD_VLAN:
3694 ret = i40e_vc_add_vlan_msg(vf, msg, msglen);
3695 break;
3696 case VIRTCHNL_OP_DEL_VLAN:
3697 ret = i40e_vc_remove_vlan_msg(vf, msg, msglen);
3698 break;
3699 case VIRTCHNL_OP_GET_STATS:
3700 ret = i40e_vc_get_stats_msg(vf, msg, msglen);
3701 break;
3702 case VIRTCHNL_OP_IWARP:
3703 ret = i40e_vc_iwarp_msg(vf, msg, msglen);
3704 break;
3705 case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
3706 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, true);
3707 break;
3708 case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
3709 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false);
3710 break;
3711 case VIRTCHNL_OP_CONFIG_RSS_KEY:
3712 ret = i40e_vc_config_rss_key(vf, msg, msglen);
3713 break;
3714 case VIRTCHNL_OP_CONFIG_RSS_LUT:
3715 ret = i40e_vc_config_rss_lut(vf, msg, msglen);
3716 break;
3717 case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
3718 ret = i40e_vc_get_rss_hena(vf, msg, msglen);
3719 break;
3720 case VIRTCHNL_OP_SET_RSS_HENA:
3721 ret = i40e_vc_set_rss_hena(vf, msg, msglen);
3722 break;
3723 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
3724 ret = i40e_vc_enable_vlan_stripping(vf, msg, msglen);
3725 break;
3726 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
3727 ret = i40e_vc_disable_vlan_stripping(vf, msg, msglen);
3728 break;
3729 case VIRTCHNL_OP_REQUEST_QUEUES:
3730 ret = i40e_vc_request_queues_msg(vf, msg, msglen);
3731 break;
3732 case VIRTCHNL_OP_ENABLE_CHANNELS:
3733 ret = i40e_vc_add_qch_msg(vf, msg);
3734 break;
3735 case VIRTCHNL_OP_DISABLE_CHANNELS:
3736 ret = i40e_vc_del_qch_msg(vf, msg);
3737 break;
3738 case VIRTCHNL_OP_ADD_CLOUD_FILTER:
3739 ret = i40e_vc_add_cloud_filter(vf, msg);
3740 break;
3741 case VIRTCHNL_OP_DEL_CLOUD_FILTER:
3742 ret = i40e_vc_del_cloud_filter(vf, msg);
3743 break;
3744 case VIRTCHNL_OP_UNKNOWN:
3745 default:
3746 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
3747 v_opcode, local_vf_id);
3748 ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
3749 I40E_ERR_NOT_IMPLEMENTED);
3750 break;
3751 }
3752
3753 return ret;
3754}
3755
3756
3757
3758
3759
3760
3761
3762
3763int i40e_vc_process_vflr_event(struct i40e_pf *pf)
3764{
3765 struct i40e_hw *hw = &pf->hw;
3766 u32 reg, reg_idx, bit_idx;
3767 struct i40e_vf *vf;
3768 int vf_id;
3769
3770 if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
3771 return 0;
3772
3773
3774
3775
3776
3777
3778 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3779 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
3780 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3781 i40e_flush(hw);
3782
3783 clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
3784 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
3785 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
3786 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
3787
3788 vf = &pf->vf[vf_id];
3789 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
3790 if (reg & BIT(bit_idx))
3791
3792 i40e_reset_vf(vf, true);
3793 }
3794
3795 return 0;
3796}
3797
3798
3799
3800
3801
3802
3803
3804
3805
3806int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
3807{
3808 struct i40e_netdev_priv *np = netdev_priv(netdev);
3809 struct i40e_vsi *vsi = np->vsi;
3810 struct i40e_pf *pf = vsi->back;
3811 struct i40e_mac_filter *f;
3812 struct i40e_vf *vf;
3813 int ret = 0;
3814 struct hlist_node *h;
3815 int bkt;
3816 u8 i;
3817
3818
3819 if (vf_id >= pf->num_alloc_vfs) {
3820 dev_err(&pf->pdev->dev,
3821 "Invalid VF Identifier %d\n", vf_id);
3822 ret = -EINVAL;
3823 goto error_param;
3824 }
3825
3826 vf = &(pf->vf[vf_id]);
3827 vsi = pf->vsi[vf->lan_vsi_idx];
3828
3829
3830
3831
3832
3833 for (i = 0; i < 15; i++) {
3834 if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
3835 break;
3836 msleep(20);
3837 }
3838 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
3839 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
3840 vf_id);
3841 ret = -EAGAIN;
3842 goto error_param;
3843 }
3844
3845 if (is_multicast_ether_addr(mac)) {
3846 dev_err(&pf->pdev->dev,
3847 "Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
3848 ret = -EINVAL;
3849 goto error_param;
3850 }
3851
3852
3853
3854
3855 spin_lock_bh(&vsi->mac_filter_hash_lock);
3856
3857
3858 if (!is_zero_ether_addr(vf->default_lan_addr.addr))
3859 i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
3860
3861
3862
3863
3864 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
3865 __i40e_del_filter(vsi, f);
3866
3867 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3868
3869
3870 if (i40e_sync_vsi_filters(vsi)) {
3871 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
3872 ret = -EIO;
3873 goto error_param;
3874 }
3875 ether_addr_copy(vf->default_lan_addr.addr, mac);
3876
3877 if (is_zero_ether_addr(mac)) {
3878 vf->pf_set_mac = false;
3879 dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id);
3880 } else {
3881 vf->pf_set_mac = true;
3882 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n",
3883 mac, vf_id);
3884 }
3885
3886
3887 i40e_vc_disable_vf(vf);
3888 dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
3889
3890error_param:
3891 return ret;
3892}
3893
3894
3895
3896
3897
3898
3899
3900
3901
3902static bool i40e_vsi_has_vlans(struct i40e_vsi *vsi)
3903{
3904 bool have_vlans;
3905
3906
3907
3908
3909 if (vsi->info.pvid)
3910 return false;
3911
3912
3913
3914
3915 spin_lock_bh(&vsi->mac_filter_hash_lock);
3916 have_vlans = i40e_is_vsi_in_vlan(vsi);
3917 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3918
3919 return have_vlans;
3920}
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930
3931
3932int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
3933 u16 vlan_id, u8 qos, __be16 vlan_proto)
3934{
3935 u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
3936 struct i40e_netdev_priv *np = netdev_priv(netdev);
3937 struct i40e_pf *pf = np->vsi->back;
3938 struct i40e_vsi *vsi;
3939 struct i40e_vf *vf;
3940 int ret = 0;
3941
3942
3943 if (vf_id >= pf->num_alloc_vfs) {
3944 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
3945 ret = -EINVAL;
3946 goto error_pvid;
3947 }
3948
3949 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
3950 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
3951 ret = -EINVAL;
3952 goto error_pvid;
3953 }
3954
3955 if (vlan_proto != htons(ETH_P_8021Q)) {
3956 dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
3957 ret = -EPROTONOSUPPORT;
3958 goto error_pvid;
3959 }
3960
3961 vf = &(pf->vf[vf_id]);
3962 vsi = pf->vsi[vf->lan_vsi_idx];
3963 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
3964 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
3965 vf_id);
3966 ret = -EAGAIN;
3967 goto error_pvid;
3968 }
3969
3970 if (le16_to_cpu(vsi->info.pvid) == vlanprio)
3971
3972 goto error_pvid;
3973
3974 if (i40e_vsi_has_vlans(vsi)) {
3975 dev_err(&pf->pdev->dev,
3976 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
3977 vf_id);
3978
3979
3980
3981
3982 i40e_vc_disable_vf(vf);
3983
3984 vsi = pf->vsi[vf->lan_vsi_idx];
3985 }
3986
3987
3988 spin_lock_bh(&vsi->mac_filter_hash_lock);
3989
3990
3991
3992
3993
3994
3995
3996
3997
3998 if ((!(vlan_id || qos) ||
3999 vlanprio != le16_to_cpu(vsi->info.pvid)) &&
4000 vsi->info.pvid) {
4001 ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
4002 if (ret) {
4003 dev_info(&vsi->back->pdev->dev,
4004 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4005 vsi->back->hw.aq.asq_last_status);
4006 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4007 goto error_pvid;
4008 }
4009 }
4010
4011 if (vsi->info.pvid) {
4012
4013 i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) &
4014 VLAN_VID_MASK));
4015 }
4016
4017 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4018 if (vlan_id || qos)
4019 ret = i40e_vsi_add_pvid(vsi, vlanprio);
4020 else
4021 i40e_vsi_remove_pvid(vsi);
4022 spin_lock_bh(&vsi->mac_filter_hash_lock);
4023
4024 if (vlan_id) {
4025 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
4026 vlan_id, qos, vf_id);
4027
4028
4029 ret = i40e_add_vlan_all_mac(vsi, vlan_id);
4030 if (ret) {
4031 dev_info(&vsi->back->pdev->dev,
4032 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4033 vsi->back->hw.aq.asq_last_status);
4034 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4035 goto error_pvid;
4036 }
4037
4038
4039 i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY);
4040 }
4041
4042 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4043
4044
4045 i40e_service_event_schedule(vsi->back);
4046
4047 if (ret) {
4048 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
4049 goto error_pvid;
4050 }
4051
4052
4053
4054
4055 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
4056 ret = 0;
4057
4058error_pvid:
4059 return ret;
4060}
4061
4062
4063
4064
4065
4066
4067
4068
4069
4070
4071int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
4072 int max_tx_rate)
4073{
4074 struct i40e_netdev_priv *np = netdev_priv(netdev);
4075 struct i40e_pf *pf = np->vsi->back;
4076 struct i40e_vsi *vsi;
4077 struct i40e_vf *vf;
4078 int ret = 0;
4079
4080
4081 if (vf_id >= pf->num_alloc_vfs) {
4082 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id);
4083 ret = -EINVAL;
4084 goto error;
4085 }
4086
4087 if (min_tx_rate) {
4088 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
4089 min_tx_rate, vf_id);
4090 return -EINVAL;
4091 }
4092
4093 vf = &(pf->vf[vf_id]);
4094 vsi = pf->vsi[vf->lan_vsi_idx];
4095 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4096 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4097 vf_id);
4098 ret = -EAGAIN;
4099 goto error;
4100 }
4101
4102 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
4103 if (ret)
4104 goto error;
4105
4106 vf->tx_rate = max_tx_rate;
4107error:
4108 return ret;
4109}
4110
4111
4112
4113
4114
4115
4116
4117
4118
4119int i40e_ndo_get_vf_config(struct net_device *netdev,
4120 int vf_id, struct ifla_vf_info *ivi)
4121{
4122 struct i40e_netdev_priv *np = netdev_priv(netdev);
4123 struct i40e_vsi *vsi = np->vsi;
4124 struct i40e_pf *pf = vsi->back;
4125 struct i40e_vf *vf;
4126 int ret = 0;
4127
4128
4129 if (vf_id >= pf->num_alloc_vfs) {
4130 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4131 ret = -EINVAL;
4132 goto error_param;
4133 }
4134
4135 vf = &(pf->vf[vf_id]);
4136
4137 vsi = pf->vsi[vf->lan_vsi_idx];
4138 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4139 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4140 vf_id);
4141 ret = -EAGAIN;
4142 goto error_param;
4143 }
4144
4145 ivi->vf = vf_id;
4146
4147 ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
4148
4149 ivi->max_tx_rate = vf->tx_rate;
4150 ivi->min_tx_rate = 0;
4151 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
4152 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
4153 I40E_VLAN_PRIORITY_SHIFT;
4154 if (vf->link_forced == false)
4155 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
4156 else if (vf->link_up == true)
4157 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
4158 else
4159 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
4160 ivi->spoofchk = vf->spoofchk;
4161 ivi->trusted = vf->trusted;
4162 ret = 0;
4163
4164error_param:
4165 return ret;
4166}
4167
4168
4169
4170
4171
4172
4173
4174
4175
4176int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
4177{
4178 struct i40e_netdev_priv *np = netdev_priv(netdev);
4179 struct i40e_pf *pf = np->vsi->back;
4180 struct virtchnl_pf_event pfe;
4181 struct i40e_hw *hw = &pf->hw;
4182 struct i40e_vf *vf;
4183 int abs_vf_id;
4184 int ret = 0;
4185
4186
4187 if (vf_id >= pf->num_alloc_vfs) {
4188 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4189 ret = -EINVAL;
4190 goto error_out;
4191 }
4192
4193 vf = &pf->vf[vf_id];
4194 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
4195
4196 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
4197 pfe.severity = PF_EVENT_SEVERITY_INFO;
4198
4199 switch (link) {
4200 case IFLA_VF_LINK_STATE_AUTO:
4201 vf->link_forced = false;
4202 pfe.event_data.link_event.link_status =
4203 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
4204 pfe.event_data.link_event.link_speed =
4205 (enum virtchnl_link_speed)
4206 pf->hw.phy.link_info.link_speed;
4207 break;
4208 case IFLA_VF_LINK_STATE_ENABLE:
4209 vf->link_forced = true;
4210 vf->link_up = true;
4211 pfe.event_data.link_event.link_status = true;
4212 pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB;
4213 break;
4214 case IFLA_VF_LINK_STATE_DISABLE:
4215 vf->link_forced = true;
4216 vf->link_up = false;
4217 pfe.event_data.link_event.link_status = false;
4218 pfe.event_data.link_event.link_speed = 0;
4219 break;
4220 default:
4221 ret = -EINVAL;
4222 goto error_out;
4223 }
4224
4225 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
4226 0, (u8 *)&pfe, sizeof(pfe), NULL);
4227
4228error_out:
4229 return ret;
4230}
4231
4232
4233
4234
4235
4236
4237
4238
4239
4240int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
4241{
4242 struct i40e_netdev_priv *np = netdev_priv(netdev);
4243 struct i40e_vsi *vsi = np->vsi;
4244 struct i40e_pf *pf = vsi->back;
4245 struct i40e_vsi_context ctxt;
4246 struct i40e_hw *hw = &pf->hw;
4247 struct i40e_vf *vf;
4248 int ret = 0;
4249
4250
4251 if (vf_id >= pf->num_alloc_vfs) {
4252 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4253 ret = -EINVAL;
4254 goto out;
4255 }
4256
4257 vf = &(pf->vf[vf_id]);
4258 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4259 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4260 vf_id);
4261 ret = -EAGAIN;
4262 goto out;
4263 }
4264
4265 if (enable == vf->spoofchk)
4266 goto out;
4267
4268 vf->spoofchk = enable;
4269 memset(&ctxt, 0, sizeof(ctxt));
4270 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
4271 ctxt.pf_num = pf->hw.pf_id;
4272 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
4273 if (enable)
4274 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
4275 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
4276 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4277 if (ret) {
4278 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
4279 ret);
4280 ret = -EIO;
4281 }
4282out:
4283 return ret;
4284}
4285
4286
4287
4288
4289
4290
4291
4292
4293
4294int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
4295{
4296 struct i40e_netdev_priv *np = netdev_priv(netdev);
4297 struct i40e_pf *pf = np->vsi->back;
4298 struct i40e_vf *vf;
4299 int ret = 0;
4300
4301
4302 if (vf_id >= pf->num_alloc_vfs) {
4303 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4304 return -EINVAL;
4305 }
4306
4307 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
4308 dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
4309 return -EINVAL;
4310 }
4311
4312 vf = &pf->vf[vf_id];
4313
4314 if (setting == vf->trusted)
4315 goto out;
4316
4317 vf->trusted = setting;
4318 i40e_vc_disable_vf(vf);
4319 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
4320 vf_id, setting ? "" : "un");
4321
4322 if (vf->adq_enabled) {
4323 if (!vf->trusted) {
4324 dev_info(&pf->pdev->dev,
4325 "VF %u no longer Trusted, deleting all cloud filters\n",
4326 vf_id);
4327 i40e_del_all_cloud_filters(vf);
4328 }
4329 }
4330
4331out:
4332 return ret;
4333}
4334