1
2
3
4#include "i40e.h"
5
6
7
8
9
10
11
12
13
14
15
16
17
18static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
19 enum virtchnl_ops v_opcode,
20 i40e_status v_retval, u8 *msg,
21 u16 msglen)
22{
23 struct i40e_hw *hw = &pf->hw;
24 struct i40e_vf *vf = pf->vf;
25 int i;
26
27 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
28 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
29
30 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
31 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
32 continue;
33
34
35
36
37 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
38 msg, msglen, NULL);
39 }
40}
41
42
43
44
45
46
47
48
49static u32
50i40e_vc_link_speed2mbps(enum i40e_aq_link_speed link_speed)
51{
52 switch (link_speed) {
53 case I40E_LINK_SPEED_100MB:
54 return SPEED_100;
55 case I40E_LINK_SPEED_1GB:
56 return SPEED_1000;
57 case I40E_LINK_SPEED_2_5GB:
58 return SPEED_2500;
59 case I40E_LINK_SPEED_5GB:
60 return SPEED_5000;
61 case I40E_LINK_SPEED_10GB:
62 return SPEED_10000;
63 case I40E_LINK_SPEED_20GB:
64 return SPEED_20000;
65 case I40E_LINK_SPEED_25GB:
66 return SPEED_25000;
67 case I40E_LINK_SPEED_40GB:
68 return SPEED_40000;
69 case I40E_LINK_SPEED_UNKNOWN:
70 return SPEED_UNKNOWN;
71 }
72 return SPEED_UNKNOWN;
73}
74
75
76
77
78
79
80
81
82
83static void i40e_set_vf_link_state(struct i40e_vf *vf,
84 struct virtchnl_pf_event *pfe, struct i40e_link_status *ls)
85{
86 u8 link_status = ls->link_info & I40E_AQ_LINK_UP;
87
88 if (vf->link_forced)
89 link_status = vf->link_up;
90
91 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
92 pfe->event_data.link_event_adv.link_speed = link_status ?
93 i40e_vc_link_speed2mbps(ls->link_speed) : 0;
94 pfe->event_data.link_event_adv.link_status = link_status;
95 } else {
96 pfe->event_data.link_event.link_speed = link_status ?
97 i40e_virtchnl_link_speed(ls->link_speed) : 0;
98 pfe->event_data.link_event.link_status = link_status;
99 }
100}
101
102
103
104
105
106
107
108static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
109{
110 struct virtchnl_pf_event pfe;
111 struct i40e_pf *pf = vf->pf;
112 struct i40e_hw *hw = &pf->hw;
113 struct i40e_link_status *ls = &pf->hw.phy.link_info;
114 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
115
116 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
117 pfe.severity = PF_EVENT_SEVERITY_INFO;
118
119 i40e_set_vf_link_state(vf, &pfe, ls);
120
121 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
122 0, (u8 *)&pfe, sizeof(pfe), NULL);
123}
124
125
126
127
128
129
130
131void i40e_vc_notify_link_state(struct i40e_pf *pf)
132{
133 int i;
134
135 for (i = 0; i < pf->num_alloc_vfs; i++)
136 i40e_vc_notify_vf_link_state(&pf->vf[i]);
137}
138
139
140
141
142
143
144
145void i40e_vc_notify_reset(struct i40e_pf *pf)
146{
147 struct virtchnl_pf_event pfe;
148
149 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
150 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
151 i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0,
152 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
153}
154
155
156
157
158
159
160
161void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
162{
163 struct virtchnl_pf_event pfe;
164 int abs_vf_id;
165
166
167 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
168 return;
169
170
171 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
172 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
173 return;
174
175 abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
176
177 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
178 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
179 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT,
180 0, (u8 *)&pfe,
181 sizeof(struct virtchnl_pf_event), NULL);
182}
183
184
185
186
187
188
189
190
191static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
192{
193 struct i40e_pf *pf = vf->pf;
194 int i;
195
196 i40e_vc_notify_vf_reset(vf);
197
198
199
200
201
202
203 for (i = 0; i < 20; i++) {
204
205
206
207 if (test_bit(__I40E_VFS_RELEASING, pf->state))
208 return;
209 if (i40e_reset_vf(vf, false))
210 return;
211 usleep_range(10000, 20000);
212 }
213
214 dev_warn(&vf->pf->pdev->dev,
215 "Failed to initiate reset for VF %d after 200 milliseconds\n",
216 vf->vf_id);
217}
218
219
220
221
222
223
224
225
226static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
227{
228 struct i40e_pf *pf = vf->pf;
229 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
230
231 return (vsi && (vsi->vf_id == vf->vf_id));
232}
233
234
235
236
237
238
239
240
241
242static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
243 u16 qid)
244{
245 struct i40e_pf *pf = vf->pf;
246 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
247
248 return (vsi && (qid < vsi->alloc_queue_pairs));
249}
250
251
252
253
254
255
256
257
258static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id)
259{
260 struct i40e_pf *pf = vf->pf;
261
262 return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
263}
264
265
266
267
268
269
270
271
272
273
274
275static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
276 u8 vsi_queue_id)
277{
278 struct i40e_pf *pf = vf->pf;
279 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
280 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
281
282 if (!vsi)
283 return pf_queue_id;
284
285 if (le16_to_cpu(vsi->info.mapping_flags) &
286 I40E_AQ_VSI_QUE_MAP_NONCONTIG)
287 pf_queue_id =
288 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
289 else
290 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
291 vsi_queue_id;
292
293 return pf_queue_id;
294}
295
296
297
298
299
300
301
302
303
304static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id)
305{
306 int i;
307
308 if (vf->adq_enabled) {
309
310
311
312
313 for (i = 0; i < vf->num_tc; i++) {
314 if (queue_id < vf->ch[i].num_qps) {
315 vsi_id = vf->ch[i].vsi_id;
316 break;
317 }
318
319
320
321 queue_id -= vf->ch[i].num_qps;
322 }
323 }
324
325 return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id);
326}
327
328
329
330
331
332
333
334
335
336static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
337 struct virtchnl_vector_map *vecmap)
338{
339 unsigned long linklistmap = 0, tempmap;
340 struct i40e_pf *pf = vf->pf;
341 struct i40e_hw *hw = &pf->hw;
342 u16 vsi_queue_id, pf_queue_id;
343 enum i40e_queue_type qtype;
344 u16 next_q, vector_id, size;
345 u32 reg, reg_idx;
346 u16 itr_idx = 0;
347
348 vector_id = vecmap->vector_id;
349
350 if (0 == vector_id)
351 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
352 else
353 reg_idx = I40E_VPINT_LNKLSTN(
354 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
355 (vector_id - 1));
356
357 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
358
359 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
360 goto irq_list_done;
361 }
362 tempmap = vecmap->rxq_map;
363 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
364 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
365 vsi_queue_id));
366 }
367
368 tempmap = vecmap->txq_map;
369 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
370 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
371 vsi_queue_id + 1));
372 }
373
374 size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES;
375 next_q = find_first_bit(&linklistmap, size);
376 if (unlikely(next_q == size))
377 goto irq_list_done;
378
379 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
380 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
381 pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id);
382 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
383
384 wr32(hw, reg_idx, reg);
385
386 while (next_q < size) {
387 switch (qtype) {
388 case I40E_QUEUE_TYPE_RX:
389 reg_idx = I40E_QINT_RQCTL(pf_queue_id);
390 itr_idx = vecmap->rxitr_idx;
391 break;
392 case I40E_QUEUE_TYPE_TX:
393 reg_idx = I40E_QINT_TQCTL(pf_queue_id);
394 itr_idx = vecmap->txitr_idx;
395 break;
396 default:
397 break;
398 }
399
400 next_q = find_next_bit(&linklistmap, size, next_q + 1);
401 if (next_q < size) {
402 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
403 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
404 pf_queue_id = i40e_get_real_pf_qid(vf,
405 vsi_id,
406 vsi_queue_id);
407 } else {
408 pf_queue_id = I40E_QUEUE_END_OF_LIST;
409 qtype = 0;
410 }
411
412
413 reg = (vector_id) |
414 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
415 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
416 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
417 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
418 wr32(hw, reg_idx, reg);
419 }
420
421
422
423
424 if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
425 (vector_id == 0)) {
426 reg = rd32(hw, I40E_GLINT_CTL);
427 if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
428 reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
429 wr32(hw, I40E_GLINT_CTL, reg);
430 }
431 }
432
433irq_list_done:
434 i40e_flush(hw);
435}
436
437
438
439
440
441
442static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
443{
444 struct i40e_pf *pf = vf->pf;
445 struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info;
446 u32 msix_vf;
447 u32 i;
448
449 if (!vf->qvlist_info)
450 return;
451
452 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
453 for (i = 0; i < qvlist_info->num_vectors; i++) {
454 struct virtchnl_iwarp_qv_info *qv_info;
455 u32 next_q_index, next_q_type;
456 struct i40e_hw *hw = &pf->hw;
457 u32 v_idx, reg_idx, reg;
458
459 qv_info = &qvlist_info->qv_info[i];
460 if (!qv_info)
461 continue;
462 v_idx = qv_info->v_idx;
463 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
464
465
466
467 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
468 reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
469 next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK)
470 >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT;
471 next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK)
472 >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT;
473
474 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
475 reg = (next_q_index &
476 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
477 (next_q_type <<
478 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
479
480 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
481 }
482 }
483 kfree(vf->qvlist_info);
484 vf->qvlist_info = NULL;
485}
486
487
488
489
490
491
492
493
494static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
495 struct virtchnl_iwarp_qvlist_info *qvlist_info)
496{
497 struct i40e_pf *pf = vf->pf;
498 struct i40e_hw *hw = &pf->hw;
499 struct virtchnl_iwarp_qv_info *qv_info;
500 u32 v_idx, i, reg_idx, reg;
501 u32 next_q_idx, next_q_type;
502 u32 msix_vf;
503 int ret = 0;
504
505 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
506
507 if (qvlist_info->num_vectors > msix_vf) {
508 dev_warn(&pf->pdev->dev,
509 "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n",
510 qvlist_info->num_vectors,
511 msix_vf);
512 ret = -EINVAL;
513 goto err_out;
514 }
515
516 kfree(vf->qvlist_info);
517 vf->qvlist_info = kzalloc(struct_size(vf->qvlist_info, qv_info,
518 qvlist_info->num_vectors - 1),
519 GFP_KERNEL);
520 if (!vf->qvlist_info) {
521 ret = -ENOMEM;
522 goto err_out;
523 }
524 vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
525
526 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
527 for (i = 0; i < qvlist_info->num_vectors; i++) {
528 qv_info = &qvlist_info->qv_info[i];
529 if (!qv_info)
530 continue;
531
532
533 if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) {
534 ret = -EINVAL;
535 goto err_free;
536 }
537
538 v_idx = qv_info->v_idx;
539
540 vf->qvlist_info->qv_info[i] = *qv_info;
541
542 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
543
544
545
546
547 reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
548 next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >>
549 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT);
550 next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >>
551 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
552
553 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
554 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
555 reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK |
556 (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) |
557 (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) |
558 (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) |
559 (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT));
560 wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg);
561
562 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
563 reg = (qv_info->ceq_idx &
564 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
565 (I40E_QUEUE_TYPE_PE_CEQ <<
566 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
567 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
568 }
569
570 if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
571 reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK |
572 (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) |
573 (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT));
574
575 wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg);
576 }
577 }
578
579 return 0;
580err_free:
581 kfree(vf->qvlist_info);
582 vf->qvlist_info = NULL;
583err_out:
584 return ret;
585}
586
587
588
589
590
591
592
593
594
595
596static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
597 u16 vsi_queue_id,
598 struct virtchnl_txq_info *info)
599{
600 struct i40e_pf *pf = vf->pf;
601 struct i40e_hw *hw = &pf->hw;
602 struct i40e_hmc_obj_txq tx_ctx;
603 struct i40e_vsi *vsi;
604 u16 pf_queue_id;
605 u32 qtx_ctl;
606 int ret = 0;
607
608 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
609 ret = -ENOENT;
610 goto error_context;
611 }
612 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
613 vsi = i40e_find_vsi_from_id(pf, vsi_id);
614 if (!vsi) {
615 ret = -ENOENT;
616 goto error_context;
617 }
618
619
620 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
621
622
623 tx_ctx.base = info->dma_ring_addr / 128;
624 tx_ctx.qlen = info->ring_len;
625 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
626 tx_ctx.rdylist_act = 0;
627 tx_ctx.head_wb_ena = info->headwb_enabled;
628 tx_ctx.head_wb_addr = info->dma_headwb_addr;
629
630
631 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
632 if (ret) {
633 dev_err(&pf->pdev->dev,
634 "Failed to clear VF LAN Tx queue context %d, error: %d\n",
635 pf_queue_id, ret);
636 ret = -ENOENT;
637 goto error_context;
638 }
639
640
641 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
642 if (ret) {
643 dev_err(&pf->pdev->dev,
644 "Failed to set VF LAN Tx queue context %d error: %d\n",
645 pf_queue_id, ret);
646 ret = -ENOENT;
647 goto error_context;
648 }
649
650
651 qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
652 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
653 & I40E_QTX_CTL_PF_INDX_MASK);
654 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
655 << I40E_QTX_CTL_VFVM_INDX_SHIFT)
656 & I40E_QTX_CTL_VFVM_INDX_MASK);
657 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
658 i40e_flush(hw);
659
660error_context:
661 return ret;
662}
663
664
665
666
667
668
669
670
671
672
673static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
674 u16 vsi_queue_id,
675 struct virtchnl_rxq_info *info)
676{
677 struct i40e_pf *pf = vf->pf;
678 struct i40e_hw *hw = &pf->hw;
679 struct i40e_hmc_obj_rxq rx_ctx;
680 u16 pf_queue_id;
681 int ret = 0;
682
683 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
684
685
686 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
687
688
689 rx_ctx.base = info->dma_ring_addr / 128;
690 rx_ctx.qlen = info->ring_len;
691
692 if (info->splithdr_enabled) {
693 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
694 I40E_RX_SPLIT_IP |
695 I40E_RX_SPLIT_TCP_UDP |
696 I40E_RX_SPLIT_SCTP;
697
698 if (info->hdr_size > ((2 * 1024) - 64)) {
699 ret = -EINVAL;
700 goto error_param;
701 }
702 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
703
704
705 rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
706 }
707
708
709 if (info->databuffer_size > ((16 * 1024) - 128)) {
710 ret = -EINVAL;
711 goto error_param;
712 }
713 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
714
715
716 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
717 ret = -EINVAL;
718 goto error_param;
719 }
720 rx_ctx.rxmax = info->max_pkt_size;
721
722
723 rx_ctx.dsize = 1;
724
725
726 rx_ctx.lrxqthresh = 1;
727 rx_ctx.crcstrip = 1;
728 rx_ctx.prefena = 1;
729 rx_ctx.l2tsel = 1;
730
731
732 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
733 if (ret) {
734 dev_err(&pf->pdev->dev,
735 "Failed to clear VF LAN Rx queue context %d, error: %d\n",
736 pf_queue_id, ret);
737 ret = -ENOENT;
738 goto error_param;
739 }
740
741
742 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
743 if (ret) {
744 dev_err(&pf->pdev->dev,
745 "Failed to set VF LAN Rx queue context %d error: %d\n",
746 pf_queue_id, ret);
747 ret = -ENOENT;
748 goto error_param;
749 }
750
751error_param:
752 return ret;
753}
754
755
756
757
758
759
760
761
762static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
763{
764 struct i40e_mac_filter *f = NULL;
765 struct i40e_pf *pf = vf->pf;
766 struct i40e_vsi *vsi;
767 u64 max_tx_rate = 0;
768 int ret = 0;
769
770 vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid,
771 vf->vf_id);
772
773 if (!vsi) {
774 dev_err(&pf->pdev->dev,
775 "add vsi failed for VF %d, aq_err %d\n",
776 vf->vf_id, pf->hw.aq.asq_last_status);
777 ret = -ENOENT;
778 goto error_alloc_vsi_res;
779 }
780
781 if (!idx) {
782 u64 hena = i40e_pf_get_default_rss_hena(pf);
783 u8 broadcast[ETH_ALEN];
784
785 vf->lan_vsi_idx = vsi->idx;
786 vf->lan_vsi_id = vsi->id;
787
788
789
790
791
792
793 if (vf->port_vlan_id)
794 i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
795
796 spin_lock_bh(&vsi->mac_filter_hash_lock);
797 if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
798 f = i40e_add_mac_filter(vsi,
799 vf->default_lan_addr.addr);
800 if (!f)
801 dev_info(&pf->pdev->dev,
802 "Could not add MAC filter %pM for VF %d\n",
803 vf->default_lan_addr.addr, vf->vf_id);
804 }
805 eth_broadcast_addr(broadcast);
806 f = i40e_add_mac_filter(vsi, broadcast);
807 if (!f)
808 dev_info(&pf->pdev->dev,
809 "Could not allocate VF broadcast filter\n");
810 spin_unlock_bh(&vsi->mac_filter_hash_lock);
811 wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
812 wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
813
814 ret = i40e_sync_vsi_filters(vsi);
815 if (ret)
816 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
817 }
818
819
820 if (vf->adq_enabled) {
821 vf->ch[idx].vsi_idx = vsi->idx;
822 vf->ch[idx].vsi_id = vsi->id;
823 }
824
825
826 if (vf->tx_rate) {
827 max_tx_rate = vf->tx_rate;
828 } else if (vf->ch[idx].max_tx_rate) {
829 max_tx_rate = vf->ch[idx].max_tx_rate;
830 }
831
832 if (max_tx_rate) {
833 max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR);
834 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
835 max_tx_rate, 0, NULL);
836 if (ret)
837 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
838 vf->vf_id, ret);
839 }
840
841error_alloc_vsi_res:
842 return ret;
843}
844
845
846
847
848
849
850
851
852static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf)
853{
854 struct i40e_pf *pf = vf->pf;
855 struct i40e_hw *hw = &pf->hw;
856 u32 reg, num_tc = 1;
857 u16 vsi_id, qps;
858 int i, j;
859
860 if (vf->adq_enabled)
861 num_tc = vf->num_tc;
862
863 for (i = 0; i < num_tc; i++) {
864 if (vf->adq_enabled) {
865 qps = vf->ch[i].num_qps;
866 vsi_id = vf->ch[i].vsi_id;
867 } else {
868 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
869 vsi_id = vf->lan_vsi_id;
870 }
871
872 for (j = 0; j < 7; j++) {
873 if (j * 2 >= qps) {
874
875 reg = 0x07FF07FF;
876 } else {
877 u16 qid = i40e_vc_get_pf_queue_id(vf,
878 vsi_id,
879 j * 2);
880 reg = qid;
881 qid = i40e_vc_get_pf_queue_id(vf, vsi_id,
882 (j * 2) + 1);
883 reg |= qid << 16;
884 }
885 i40e_write_rx_ctl(hw,
886 I40E_VSILAN_QTABLE(j, vsi_id),
887 reg);
888 }
889 }
890}
891
892
893
894
895
896
897
898
899static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf)
900{
901 struct i40e_pf *pf = vf->pf;
902 struct i40e_hw *hw = &pf->hw;
903 u32 reg, total_qps = 0;
904 u32 qps, num_tc = 1;
905 u16 vsi_id, qid;
906 int i, j;
907
908 if (vf->adq_enabled)
909 num_tc = vf->num_tc;
910
911 for (i = 0; i < num_tc; i++) {
912 if (vf->adq_enabled) {
913 qps = vf->ch[i].num_qps;
914 vsi_id = vf->ch[i].vsi_id;
915 } else {
916 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
917 vsi_id = vf->lan_vsi_id;
918 }
919
920 for (j = 0; j < qps; j++) {
921 qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j);
922
923 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
924 wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id),
925 reg);
926 total_qps++;
927 }
928 }
929}
930
931
932
933
934
935
936
937static void i40e_enable_vf_mappings(struct i40e_vf *vf)
938{
939 struct i40e_pf *pf = vf->pf;
940 struct i40e_hw *hw = &pf->hw;
941 u32 reg;
942
943
944
945
946
947 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
948 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
949
950
951 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
952 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
953
954 i40e_map_pf_to_vf_queues(vf);
955 i40e_map_pf_queues_to_vsi(vf);
956
957 i40e_flush(hw);
958}
959
960
961
962
963
964
965
966static void i40e_disable_vf_mappings(struct i40e_vf *vf)
967{
968 struct i40e_pf *pf = vf->pf;
969 struct i40e_hw *hw = &pf->hw;
970 int i;
971
972
973 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
974 for (i = 0; i < I40E_MAX_VSI_QP; i++)
975 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
976 I40E_QUEUE_END_OF_LIST);
977 i40e_flush(hw);
978}
979
980
981
982
983
984
985
986static void i40e_free_vf_res(struct i40e_vf *vf)
987{
988 struct i40e_pf *pf = vf->pf;
989 struct i40e_hw *hw = &pf->hw;
990 u32 reg_idx, reg;
991 int i, j, msix_vf;
992
993
994
995
996 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
997
998
999
1000
1001 if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) {
1002 pf->queues_left += vf->num_queue_pairs -
1003 I40E_DEFAULT_QUEUES_PER_VF;
1004 }
1005
1006
1007 if (vf->lan_vsi_idx) {
1008 i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
1009 vf->lan_vsi_idx = 0;
1010 vf->lan_vsi_id = 0;
1011 }
1012
1013
1014 if (vf->adq_enabled && vf->ch[0].vsi_idx) {
1015 for (j = 0; j < vf->num_tc; j++) {
1016
1017
1018
1019
1020 if (j)
1021 i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]);
1022 vf->ch[j].vsi_idx = 0;
1023 vf->ch[j].vsi_id = 0;
1024 }
1025 }
1026 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
1027
1028
1029 for (i = 0; i < msix_vf; i++) {
1030
1031 if (0 == i)
1032 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
1033 else
1034 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
1035 (vf->vf_id))
1036 + (i - 1));
1037 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
1038 i40e_flush(hw);
1039 }
1040
1041
1042 for (i = 0; i < msix_vf; i++) {
1043
1044 if (0 == i)
1045 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
1046 else
1047 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
1048 (vf->vf_id))
1049 + (i - 1));
1050 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
1051 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1052 wr32(hw, reg_idx, reg);
1053 i40e_flush(hw);
1054 }
1055
1056 vf->num_queue_pairs = 0;
1057 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1058 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
1059}
1060
1061
1062
1063
1064
1065
1066
1067static int i40e_alloc_vf_res(struct i40e_vf *vf)
1068{
1069 struct i40e_pf *pf = vf->pf;
1070 int total_queue_pairs = 0;
1071 int ret, idx;
1072
1073 if (vf->num_req_queues &&
1074 vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF)
1075 pf->num_vf_qps = vf->num_req_queues;
1076 else
1077 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
1078
1079
1080 ret = i40e_alloc_vsi_res(vf, 0);
1081 if (ret)
1082 goto error_alloc;
1083 total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
1084
1085
1086 if (vf->adq_enabled) {
1087 if (pf->queues_left >=
1088 (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) {
1089
1090 for (idx = 1; idx < vf->num_tc; idx++) {
1091 ret = i40e_alloc_vsi_res(vf, idx);
1092 if (ret)
1093 goto error_alloc;
1094 }
1095
1096 total_queue_pairs = I40E_MAX_VF_QUEUES;
1097 } else {
1098 dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n",
1099 vf->vf_id);
1100 vf->adq_enabled = false;
1101 }
1102 }
1103
1104
1105
1106
1107
1108
1109 if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF)
1110 pf->queues_left -=
1111 total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF;
1112
1113 if (vf->trusted)
1114 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1115 else
1116 clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1117
1118
1119
1120
1121 vf->num_queue_pairs = total_queue_pairs;
1122
1123
1124 set_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1125
1126error_alloc:
1127 if (ret)
1128 i40e_free_vf_res(vf);
1129
1130 return ret;
1131}
1132
1133#define VF_DEVICE_STATUS 0xAA
1134#define VF_TRANS_PENDING_MASK 0x20
1135
1136
1137
1138
1139
1140
1141
1142static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
1143{
1144 struct i40e_pf *pf = vf->pf;
1145 struct i40e_hw *hw = &pf->hw;
1146 int vf_abs_id, i;
1147 u32 reg;
1148
1149 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
1150
1151 wr32(hw, I40E_PF_PCI_CIAA,
1152 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
1153 for (i = 0; i < 100; i++) {
1154 reg = rd32(hw, I40E_PF_PCI_CIAD);
1155 if ((reg & VF_TRANS_PENDING_MASK) == 0)
1156 return 0;
1157 udelay(1);
1158 }
1159 return -EIO;
1160}
1161
1162
1163
1164
1165
1166
1167
1168static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1169{
1170 struct i40e_mac_filter *f;
1171 u16 num_vlans = 0, bkt;
1172
1173 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1174 if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
1175 num_vlans++;
1176 }
1177
1178 return num_vlans;
1179}
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, u16 *num_vlans,
1191 s16 **vlan_list)
1192{
1193 struct i40e_mac_filter *f;
1194 int i = 0;
1195 int bkt;
1196
1197 spin_lock_bh(&vsi->mac_filter_hash_lock);
1198 *num_vlans = i40e_getnum_vf_vsi_vlan_filters(vsi);
1199 *vlan_list = kcalloc(*num_vlans, sizeof(**vlan_list), GFP_ATOMIC);
1200 if (!(*vlan_list))
1201 goto err;
1202
1203 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1204 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
1205 continue;
1206 (*vlan_list)[i++] = f->vlan;
1207 }
1208err:
1209 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1210}
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223static i40e_status
1224i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
1225 bool unicast_enable, s16 *vl, u16 num_vlans)
1226{
1227 i40e_status aq_ret, aq_tmp = 0;
1228 struct i40e_pf *pf = vf->pf;
1229 struct i40e_hw *hw = &pf->hw;
1230 int i;
1231
1232
1233 if (!num_vlans || !vl) {
1234 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, seid,
1235 multi_enable,
1236 NULL);
1237 if (aq_ret) {
1238 int aq_err = pf->hw.aq.asq_last_status;
1239
1240 dev_err(&pf->pdev->dev,
1241 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
1242 vf->vf_id,
1243 i40e_stat_str(&pf->hw, aq_ret),
1244 i40e_aq_str(&pf->hw, aq_err));
1245
1246 return aq_ret;
1247 }
1248
1249 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, seid,
1250 unicast_enable,
1251 NULL, true);
1252
1253 if (aq_ret) {
1254 int aq_err = pf->hw.aq.asq_last_status;
1255
1256 dev_err(&pf->pdev->dev,
1257 "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
1258 vf->vf_id,
1259 i40e_stat_str(&pf->hw, aq_ret),
1260 i40e_aq_str(&pf->hw, aq_err));
1261 }
1262
1263 return aq_ret;
1264 }
1265
1266 for (i = 0; i < num_vlans; i++) {
1267 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, seid,
1268 multi_enable,
1269 vl[i], NULL);
1270 if (aq_ret) {
1271 int aq_err = pf->hw.aq.asq_last_status;
1272
1273 dev_err(&pf->pdev->dev,
1274 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
1275 vf->vf_id,
1276 i40e_stat_str(&pf->hw, aq_ret),
1277 i40e_aq_str(&pf->hw, aq_err));
1278
1279 if (!aq_tmp)
1280 aq_tmp = aq_ret;
1281 }
1282
1283 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, seid,
1284 unicast_enable,
1285 vl[i], NULL);
1286 if (aq_ret) {
1287 int aq_err = pf->hw.aq.asq_last_status;
1288
1289 dev_err(&pf->pdev->dev,
1290 "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
1291 vf->vf_id,
1292 i40e_stat_str(&pf->hw, aq_ret),
1293 i40e_aq_str(&pf->hw, aq_err));
1294
1295 if (!aq_tmp)
1296 aq_tmp = aq_ret;
1297 }
1298 }
1299
1300 if (aq_tmp)
1301 aq_ret = aq_tmp;
1302
1303 return aq_ret;
1304}
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
1317 u16 vsi_id,
1318 bool allmulti,
1319 bool alluni)
1320{
1321 i40e_status aq_ret = I40E_SUCCESS;
1322 struct i40e_pf *pf = vf->pf;
1323 struct i40e_vsi *vsi;
1324 u16 num_vlans;
1325 s16 *vl;
1326
1327 vsi = i40e_find_vsi_from_id(pf, vsi_id);
1328 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
1329 return I40E_ERR_PARAM;
1330
1331 if (vf->port_vlan_id) {
1332 aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti,
1333 alluni, &vf->port_vlan_id, 1);
1334 return aq_ret;
1335 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
1336 i40e_get_vlan_list_sync(vsi, &num_vlans, &vl);
1337
1338 if (!vl)
1339 return I40E_ERR_NO_MEMORY;
1340
1341 aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
1342 vl, num_vlans);
1343 kfree(vl);
1344 return aq_ret;
1345 }
1346
1347
1348 aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
1349 NULL, 0);
1350 return aq_ret;
1351}
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
1363{
1364 struct i40e_pf *pf = vf->pf;
1365 struct i40e_hw *hw = &pf->hw;
1366 u32 reg, reg_idx, bit_idx;
1367
1368
1369 clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1370
1371
1372
1373
1374
1375
1376
1377 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1378
1379
1380
1381
1382 if (!flr) {
1383
1384 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1385 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1386 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1387 i40e_flush(hw);
1388 }
1389
1390 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
1391 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
1392 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1393 i40e_flush(hw);
1394
1395 if (i40e_quiesce_vf_pci(vf))
1396 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
1397 vf->vf_id);
1398}
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
1409{
1410 struct i40e_pf *pf = vf->pf;
1411 struct i40e_hw *hw = &pf->hw;
1412 u32 reg;
1413
1414
1415 i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false);
1416
1417
1418 i40e_free_vf_res(vf);
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1431 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1432 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1433
1434
1435 if (!i40e_alloc_vf_res(vf)) {
1436 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1437 i40e_enable_vf_mappings(vf);
1438 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1439 clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1440
1441 if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
1442 &vf->vf_states))
1443 i40e_notify_client_of_vf_reset(pf, abs_vf_id);
1444 vf->num_vlan = 0;
1445 }
1446
1447
1448
1449
1450
1451 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1452}
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
1463{
1464 struct i40e_pf *pf = vf->pf;
1465 struct i40e_hw *hw = &pf->hw;
1466 bool rsd = false;
1467 u32 reg;
1468 int i;
1469
1470 if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state))
1471 return true;
1472
1473
1474
1475
1476 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1477 return true;
1478
1479 i40e_trigger_vf_reset(vf, flr);
1480
1481
1482
1483
1484 for (i = 0; i < 10; i++) {
1485
1486
1487
1488
1489
1490 usleep_range(10000, 20000);
1491 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1492 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
1493 rsd = true;
1494 break;
1495 }
1496 }
1497
1498 if (flr)
1499 usleep_range(10000, 20000);
1500
1501 if (!rsd)
1502 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1503 vf->vf_id);
1504 usleep_range(10000, 20000);
1505
1506
1507 if (vf->lan_vsi_idx != 0)
1508 i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
1509
1510 i40e_cleanup_reset_vf(vf);
1511
1512 i40e_flush(hw);
1513 clear_bit(__I40E_VF_DISABLE, pf->state);
1514
1515 return true;
1516}
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
1531{
1532 struct i40e_hw *hw = &pf->hw;
1533 struct i40e_vf *vf;
1534 int i, v;
1535 u32 reg;
1536
1537
1538 if (!pf->num_alloc_vfs)
1539 return false;
1540
1541
1542 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1543 return false;
1544
1545
1546 for (v = 0; v < pf->num_alloc_vfs; v++)
1547 i40e_trigger_vf_reset(&pf->vf[v], flr);
1548
1549
1550
1551
1552
1553
1554
1555 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1556 usleep_range(10000, 20000);
1557
1558
1559
1560
1561 while (v < pf->num_alloc_vfs) {
1562 vf = &pf->vf[v];
1563 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1564 if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
1565 break;
1566
1567
1568
1569
1570 v++;
1571 }
1572 }
1573
1574 if (flr)
1575 usleep_range(10000, 20000);
1576
1577
1578
1579
1580 if (v < pf->num_alloc_vfs)
1581 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1582 pf->vf[v].vf_id);
1583 usleep_range(10000, 20000);
1584
1585
1586
1587
1588 for (v = 0; v < pf->num_alloc_vfs; v++) {
1589
1590 if (pf->vf[v].lan_vsi_idx == 0)
1591 continue;
1592
1593 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
1594 }
1595
1596
1597
1598
1599 for (v = 0; v < pf->num_alloc_vfs; v++) {
1600
1601 if (pf->vf[v].lan_vsi_idx == 0)
1602 continue;
1603
1604 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
1605 }
1606
1607
1608
1609
1610 mdelay(50);
1611
1612
1613 for (v = 0; v < pf->num_alloc_vfs; v++)
1614 i40e_cleanup_reset_vf(&pf->vf[v]);
1615
1616 i40e_flush(hw);
1617 clear_bit(__I40E_VF_DISABLE, pf->state);
1618
1619 return true;
1620}
1621
1622
1623
1624
1625
1626
1627
1628void i40e_free_vfs(struct i40e_pf *pf)
1629{
1630 struct i40e_hw *hw = &pf->hw;
1631 u32 reg_idx, bit_idx;
1632 int i, tmp, vf_id;
1633
1634 if (!pf->vf)
1635 return;
1636
1637 set_bit(__I40E_VFS_RELEASING, pf->state);
1638 while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1639 usleep_range(1000, 2000);
1640
1641 i40e_notify_client_of_vf_enable(pf, 0);
1642
1643
1644
1645
1646
1647 if (!pci_vfs_assigned(pf->pdev))
1648 pci_disable_sriov(pf->pdev);
1649 else
1650 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
1651
1652
1653 for (i = 0; i < pf->num_alloc_vfs; i++) {
1654 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1655 continue;
1656
1657 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]);
1658 }
1659
1660 for (i = 0; i < pf->num_alloc_vfs; i++) {
1661 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1662 continue;
1663
1664 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
1665 }
1666
1667
1668 tmp = pf->num_alloc_vfs;
1669 pf->num_alloc_vfs = 0;
1670 for (i = 0; i < tmp; i++) {
1671 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1672 i40e_free_vf_res(&pf->vf[i]);
1673
1674 i40e_disable_vf_mappings(&pf->vf[i]);
1675 }
1676
1677 kfree(pf->vf);
1678 pf->vf = NULL;
1679
1680
1681
1682
1683
1684 if (!pci_vfs_assigned(pf->pdev)) {
1685
1686
1687
1688 for (vf_id = 0; vf_id < tmp; vf_id++) {
1689 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1690 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1691 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1692 }
1693 }
1694 clear_bit(__I40E_VF_DISABLE, pf->state);
1695 clear_bit(__I40E_VFS_RELEASING, pf->state);
1696}
1697
1698#ifdef CONFIG_PCI_IOV
1699
1700
1701
1702
1703
1704
1705
1706int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
1707{
1708 struct i40e_vf *vfs;
1709 int i, ret = 0;
1710
1711
1712 i40e_irq_dynamic_disable_icr0(pf);
1713
1714
1715 if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
1716 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1717 if (ret) {
1718 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1719 pf->num_alloc_vfs = 0;
1720 goto err_iov;
1721 }
1722 }
1723
1724 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
1725 if (!vfs) {
1726 ret = -ENOMEM;
1727 goto err_alloc;
1728 }
1729 pf->vf = vfs;
1730
1731
1732 for (i = 0; i < num_alloc_vfs; i++) {
1733 vfs[i].pf = pf;
1734 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
1735 vfs[i].vf_id = i;
1736
1737
1738 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1739 vfs[i].spoofchk = true;
1740
1741 set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states);
1742
1743 }
1744 pf->num_alloc_vfs = num_alloc_vfs;
1745
1746
1747 i40e_reset_all_vfs(pf, false);
1748
1749 i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
1750
1751err_alloc:
1752 if (ret)
1753 i40e_free_vfs(pf);
1754err_iov:
1755
1756 i40e_irq_dynamic_enable_icr0(pf);
1757 return ret;
1758}
1759
1760#endif
1761
1762
1763
1764
1765
1766
1767
1768static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
1769{
1770#ifdef CONFIG_PCI_IOV
1771 struct i40e_pf *pf = pci_get_drvdata(pdev);
1772 int pre_existing_vfs = pci_num_vf(pdev);
1773 int err = 0;
1774
1775 if (test_bit(__I40E_TESTING, pf->state)) {
1776 dev_warn(&pdev->dev,
1777 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
1778 err = -EPERM;
1779 goto err_out;
1780 }
1781
1782 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1783 i40e_free_vfs(pf);
1784 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1785 goto out;
1786
1787 if (num_vfs > pf->num_req_vfs) {
1788 dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
1789 num_vfs, pf->num_req_vfs);
1790 err = -EPERM;
1791 goto err_out;
1792 }
1793
1794 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
1795 err = i40e_alloc_vfs(pf, num_vfs);
1796 if (err) {
1797 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
1798 goto err_out;
1799 }
1800
1801out:
1802 return num_vfs;
1803
1804err_out:
1805 return err;
1806#endif
1807 return 0;
1808}
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1819{
1820 struct i40e_pf *pf = pci_get_drvdata(pdev);
1821 int ret = 0;
1822
1823 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
1824 dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
1825 return -EAGAIN;
1826 }
1827
1828 if (num_vfs) {
1829 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
1830 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
1831 i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
1832 }
1833 ret = i40e_pci_sriov_enable(pdev, num_vfs);
1834 goto sriov_configure_out;
1835 }
1836
1837 if (!pci_vfs_assigned(pf->pdev)) {
1838 i40e_free_vfs(pf);
1839 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1840 i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
1841 } else {
1842 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
1843 ret = -EINVAL;
1844 goto sriov_configure_out;
1845 }
1846sriov_configure_out:
1847 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
1848 return ret;
1849}
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1864 u32 v_retval, u8 *msg, u16 msglen)
1865{
1866 struct i40e_pf *pf;
1867 struct i40e_hw *hw;
1868 int abs_vf_id;
1869 i40e_status aq_ret;
1870
1871
1872 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1873 return -EINVAL;
1874
1875 pf = vf->pf;
1876 hw = &pf->hw;
1877 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1878
1879
1880 if (v_retval) {
1881 vf->num_invalid_msgs++;
1882 dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
1883 vf->vf_id, v_opcode, v_retval);
1884 if (vf->num_invalid_msgs >
1885 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
1886 dev_err(&pf->pdev->dev,
1887 "Number of invalid messages exceeded for VF %d\n",
1888 vf->vf_id);
1889 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
1890 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1891 }
1892 } else {
1893 vf->num_valid_msgs++;
1894
1895 vf->num_invalid_msgs = 0;
1896 }
1897
1898 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
1899 msg, msglen, NULL);
1900 if (aq_ret) {
1901 dev_info(&pf->pdev->dev,
1902 "Unable to send the message to VF %d aq_err %d\n",
1903 vf->vf_id, pf->hw.aq.asq_last_status);
1904 return -EIO;
1905 }
1906
1907 return 0;
1908}
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
1919 enum virtchnl_ops opcode,
1920 i40e_status retval)
1921{
1922 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
1923}
1924
1925
1926
1927
1928
1929
1930
1931
1932static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
1933{
1934 struct virtchnl_version_info info = {
1935 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
1936 };
1937
1938 vf->vf_ver = *(struct virtchnl_version_info *)msg;
1939
1940 if (VF_IS_V10(&vf->vf_ver))
1941 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1942 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
1943 I40E_SUCCESS, (u8 *)&info,
1944 sizeof(struct virtchnl_version_info));
1945}
1946
1947
1948
1949
1950
1951static void i40e_del_qch(struct i40e_vf *vf)
1952{
1953 struct i40e_pf *pf = vf->pf;
1954 int i;
1955
1956
1957
1958
1959 for (i = 1; i < vf->num_tc; i++) {
1960 if (vf->ch[i].vsi_idx) {
1961 i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]);
1962 vf->ch[i].vsi_idx = 0;
1963 vf->ch[i].vsi_id = 0;
1964 }
1965 }
1966}
1967
1968
1969
1970
1971
1972
1973
1974
1975static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
1976{
1977 struct virtchnl_vf_resource *vfres = NULL;
1978 struct i40e_pf *pf = vf->pf;
1979 i40e_status aq_ret = 0;
1980 struct i40e_vsi *vsi;
1981 int num_vsis = 1;
1982 size_t len = 0;
1983 int ret;
1984
1985 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
1986 aq_ret = I40E_ERR_PARAM;
1987 goto err;
1988 }
1989
1990 len = struct_size(vfres, vsi_res, num_vsis);
1991 vfres = kzalloc(len, GFP_KERNEL);
1992 if (!vfres) {
1993 aq_ret = I40E_ERR_NO_MEMORY;
1994 len = 0;
1995 goto err;
1996 }
1997 if (VF_IS_V11(&vf->vf_ver))
1998 vf->driver_caps = *(u32 *)msg;
1999 else
2000 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
2001 VIRTCHNL_VF_OFFLOAD_RSS_REG |
2002 VIRTCHNL_VF_OFFLOAD_VLAN;
2003
2004 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
2005 vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
2006 vsi = pf->vsi[vf->lan_vsi_idx];
2007 if (!vsi->info.pvid)
2008 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
2009
2010 if (i40e_vf_client_capable(pf, vf->vf_id) &&
2011 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) {
2012 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP;
2013 set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
2014 } else {
2015 clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
2016 }
2017
2018 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2019 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
2020 } else {
2021 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
2022 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
2023 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
2024 else
2025 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
2026 }
2027
2028 if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
2029 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
2030 vfres->vf_cap_flags |=
2031 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
2032 }
2033
2034 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
2035 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
2036
2037 if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) &&
2038 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
2039 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
2040
2041 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
2042 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
2043 dev_err(&pf->pdev->dev,
2044 "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
2045 vf->vf_id);
2046 aq_ret = I40E_ERR_PARAM;
2047 goto err;
2048 }
2049 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
2050 }
2051
2052 if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) {
2053 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2054 vfres->vf_cap_flags |=
2055 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
2056 }
2057
2058 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
2059 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
2060
2061 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)
2062 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ;
2063
2064 vfres->num_vsis = num_vsis;
2065 vfres->num_queue_pairs = vf->num_queue_pairs;
2066 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
2067 vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
2068 vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
2069
2070 if (vf->lan_vsi_idx) {
2071 vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
2072 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
2073 vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
2074
2075 vfres->vsi_res[0].qset_handle
2076 = le16_to_cpu(vsi->info.qs_handle[0]);
2077 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
2078 vf->default_lan_addr.addr);
2079 }
2080 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
2081
2082err:
2083
2084 ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
2085 aq_ret, (u8 *)vfres, len);
2086
2087 kfree(vfres);
2088 return ret;
2089}
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
2100{
2101 if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
2102 i40e_reset_vf(vf, false);
2103}
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
2114{
2115 struct virtchnl_promisc_info *info =
2116 (struct virtchnl_promisc_info *)msg;
2117 struct i40e_pf *pf = vf->pf;
2118 i40e_status aq_ret = 0;
2119 bool allmulti = false;
2120 bool alluni = false;
2121
2122 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2123 aq_ret = I40E_ERR_PARAM;
2124 goto err_out;
2125 }
2126 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2127 dev_err(&pf->pdev->dev,
2128 "Unprivileged VF %d is attempting to configure promiscuous mode\n",
2129 vf->vf_id);
2130
2131
2132
2133
2134 aq_ret = 0;
2135 goto err_out;
2136 }
2137
2138 if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) {
2139 aq_ret = I40E_ERR_PARAM;
2140 goto err_out;
2141 }
2142
2143 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
2144 aq_ret = I40E_ERR_PARAM;
2145 goto err_out;
2146 }
2147
2148
2149 if (info->flags & FLAG_VF_MULTICAST_PROMISC)
2150 allmulti = true;
2151
2152 if (info->flags & FLAG_VF_UNICAST_PROMISC)
2153 alluni = true;
2154 aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti,
2155 alluni);
2156 if (aq_ret)
2157 goto err_out;
2158
2159 if (allmulti) {
2160 if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC,
2161 &vf->vf_states))
2162 dev_info(&pf->pdev->dev,
2163 "VF %d successfully set multicast promiscuous mode\n",
2164 vf->vf_id);
2165 } else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC,
2166 &vf->vf_states))
2167 dev_info(&pf->pdev->dev,
2168 "VF %d successfully unset multicast promiscuous mode\n",
2169 vf->vf_id);
2170
2171 if (alluni) {
2172 if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC,
2173 &vf->vf_states))
2174 dev_info(&pf->pdev->dev,
2175 "VF %d successfully set unicast promiscuous mode\n",
2176 vf->vf_id);
2177 } else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC,
2178 &vf->vf_states))
2179 dev_info(&pf->pdev->dev,
2180 "VF %d successfully unset unicast promiscuous mode\n",
2181 vf->vf_id);
2182
2183err_out:
2184
2185 return i40e_vc_send_resp_to_vf(vf,
2186 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
2187 aq_ret);
2188}
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
2199{
2200 struct virtchnl_vsi_queue_config_info *qci =
2201 (struct virtchnl_vsi_queue_config_info *)msg;
2202 struct virtchnl_queue_pair_info *qpi;
2203 struct i40e_pf *pf = vf->pf;
2204 u16 vsi_id, vsi_queue_id = 0;
2205 u16 num_qps_all = 0;
2206 i40e_status aq_ret = 0;
2207 int i, j = 0, idx = 0;
2208
2209 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2210 aq_ret = I40E_ERR_PARAM;
2211 goto error_param;
2212 }
2213
2214 if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
2215 aq_ret = I40E_ERR_PARAM;
2216 goto error_param;
2217 }
2218
2219 if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) {
2220 aq_ret = I40E_ERR_PARAM;
2221 goto error_param;
2222 }
2223
2224 if (vf->adq_enabled) {
2225 for (i = 0; i < I40E_MAX_VF_VSI; i++)
2226 num_qps_all += vf->ch[i].num_qps;
2227 if (num_qps_all != qci->num_queue_pairs) {
2228 aq_ret = I40E_ERR_PARAM;
2229 goto error_param;
2230 }
2231 }
2232
2233 vsi_id = qci->vsi_id;
2234
2235 for (i = 0; i < qci->num_queue_pairs; i++) {
2236 qpi = &qci->qpair[i];
2237
2238 if (!vf->adq_enabled) {
2239 if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
2240 qpi->txq.queue_id)) {
2241 aq_ret = I40E_ERR_PARAM;
2242 goto error_param;
2243 }
2244
2245 vsi_queue_id = qpi->txq.queue_id;
2246
2247 if (qpi->txq.vsi_id != qci->vsi_id ||
2248 qpi->rxq.vsi_id != qci->vsi_id ||
2249 qpi->rxq.queue_id != vsi_queue_id) {
2250 aq_ret = I40E_ERR_PARAM;
2251 goto error_param;
2252 }
2253 }
2254
2255 if (vf->adq_enabled) {
2256 if (idx >= ARRAY_SIZE(vf->ch)) {
2257 aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
2258 goto error_param;
2259 }
2260 vsi_id = vf->ch[idx].vsi_id;
2261 }
2262
2263 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
2264 &qpi->rxq) ||
2265 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
2266 &qpi->txq)) {
2267 aq_ret = I40E_ERR_PARAM;
2268 goto error_param;
2269 }
2270
2271
2272
2273
2274
2275
2276 if (vf->adq_enabled) {
2277 if (idx >= ARRAY_SIZE(vf->ch)) {
2278 aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
2279 goto error_param;
2280 }
2281 if (j == (vf->ch[idx].num_qps - 1)) {
2282 idx++;
2283 j = 0;
2284 vsi_queue_id = 0;
2285 } else {
2286 j++;
2287 vsi_queue_id++;
2288 }
2289 }
2290 }
2291
2292 if (!vf->adq_enabled) {
2293 pf->vsi[vf->lan_vsi_idx]->num_queue_pairs =
2294 qci->num_queue_pairs;
2295 } else {
2296 for (i = 0; i < vf->num_tc; i++)
2297 pf->vsi[vf->ch[i].vsi_idx]->num_queue_pairs =
2298 vf->ch[i].num_qps;
2299 }
2300
2301error_param:
2302
2303 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
2304 aq_ret);
2305}
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
2316 unsigned long queuemap)
2317{
2318 u16 vsi_queue_id, queue_id;
2319
2320 for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
2321 if (vf->adq_enabled) {
2322 vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id;
2323 queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
2324 } else {
2325 queue_id = vsi_queue_id;
2326 }
2327
2328 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id))
2329 return -EINVAL;
2330 }
2331
2332 return 0;
2333}
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
2344{
2345 struct virtchnl_irq_map_info *irqmap_info =
2346 (struct virtchnl_irq_map_info *)msg;
2347 struct virtchnl_vector_map *map;
2348 u16 vsi_id;
2349 i40e_status aq_ret = 0;
2350 int i;
2351
2352 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2353 aq_ret = I40E_ERR_PARAM;
2354 goto error_param;
2355 }
2356
2357 if (irqmap_info->num_vectors >
2358 vf->pf->hw.func_caps.num_msix_vectors_vf) {
2359 aq_ret = I40E_ERR_PARAM;
2360 goto error_param;
2361 }
2362
2363 for (i = 0; i < irqmap_info->num_vectors; i++) {
2364 map = &irqmap_info->vecmap[i];
2365
2366 if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) ||
2367 !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) {
2368 aq_ret = I40E_ERR_PARAM;
2369 goto error_param;
2370 }
2371 vsi_id = map->vsi_id;
2372
2373 if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
2374 aq_ret = I40E_ERR_PARAM;
2375 goto error_param;
2376 }
2377
2378 if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) {
2379 aq_ret = I40E_ERR_PARAM;
2380 goto error_param;
2381 }
2382
2383 i40e_config_irq_link_list(vf, vsi_id, map);
2384 }
2385error_param:
2386
2387 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
2388 aq_ret);
2389}
2390
2391
2392
2393
2394
2395
2396
2397static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2398 bool enable)
2399{
2400 struct i40e_pf *pf = vsi->back;
2401 int ret = 0;
2402 u16 q_id;
2403
2404 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2405 ret = i40e_control_wait_tx_q(vsi->seid, pf,
2406 vsi->base_queue + q_id,
2407 false , enable);
2408 if (ret)
2409 break;
2410 }
2411 return ret;
2412}
2413
2414
2415
2416
2417
2418
2419
2420static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2421 bool enable)
2422{
2423 struct i40e_pf *pf = vsi->back;
2424 int ret = 0;
2425 u16 q_id;
2426
2427 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2428 ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id,
2429 enable);
2430 if (ret)
2431 break;
2432 }
2433 return ret;
2434}
2435
2436
2437
2438
2439
2440
2441
2442static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
2443{
2444 if ((!vqs->rx_queues && !vqs->tx_queues) ||
2445 vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) ||
2446 vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES))
2447 return false;
2448
2449 return true;
2450}
2451
2452
2453
2454
2455
2456
2457
2458
2459static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
2460{
2461 struct virtchnl_queue_select *vqs =
2462 (struct virtchnl_queue_select *)msg;
2463 struct i40e_pf *pf = vf->pf;
2464 i40e_status aq_ret = 0;
2465 int i;
2466
2467 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2468 aq_ret = I40E_ERR_PARAM;
2469 goto error_param;
2470 }
2471
2472 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2473 aq_ret = I40E_ERR_PARAM;
2474 goto error_param;
2475 }
2476
2477 if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
2478 aq_ret = I40E_ERR_PARAM;
2479 goto error_param;
2480 }
2481
2482
2483 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2484 true)) {
2485 aq_ret = I40E_ERR_TIMEOUT;
2486 goto error_param;
2487 }
2488 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2489 true)) {
2490 aq_ret = I40E_ERR_TIMEOUT;
2491 goto error_param;
2492 }
2493
2494
2495 if (vf->adq_enabled) {
2496
2497 for (i = 1; i < vf->num_tc; i++) {
2498 if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx]))
2499 aq_ret = I40E_ERR_TIMEOUT;
2500 }
2501 }
2502
2503error_param:
2504
2505 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
2506 aq_ret);
2507}
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
2518{
2519 struct virtchnl_queue_select *vqs =
2520 (struct virtchnl_queue_select *)msg;
2521 struct i40e_pf *pf = vf->pf;
2522 i40e_status aq_ret = 0;
2523
2524 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2525 aq_ret = I40E_ERR_PARAM;
2526 goto error_param;
2527 }
2528
2529 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2530 aq_ret = I40E_ERR_PARAM;
2531 goto error_param;
2532 }
2533
2534 if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
2535 aq_ret = I40E_ERR_PARAM;
2536 goto error_param;
2537 }
2538
2539
2540 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2541 false)) {
2542 aq_ret = I40E_ERR_TIMEOUT;
2543 goto error_param;
2544 }
2545 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2546 false)) {
2547 aq_ret = I40E_ERR_TIMEOUT;
2548 goto error_param;
2549 }
2550error_param:
2551
2552 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
2553 aq_ret);
2554}
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
2567{
2568 struct virtchnl_vf_res_request *vfres =
2569 (struct virtchnl_vf_res_request *)msg;
2570 u16 req_pairs = vfres->num_queue_pairs;
2571 u8 cur_pairs = vf->num_queue_pairs;
2572 struct i40e_pf *pf = vf->pf;
2573
2574 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
2575 return -EINVAL;
2576
2577 if (req_pairs > I40E_MAX_VF_QUEUES) {
2578 dev_err(&pf->pdev->dev,
2579 "VF %d tried to request more than %d queues.\n",
2580 vf->vf_id,
2581 I40E_MAX_VF_QUEUES);
2582 vfres->num_queue_pairs = I40E_MAX_VF_QUEUES;
2583 } else if (req_pairs - cur_pairs > pf->queues_left) {
2584 dev_warn(&pf->pdev->dev,
2585 "VF %d requested %d more queues, but only %d left.\n",
2586 vf->vf_id,
2587 req_pairs - cur_pairs,
2588 pf->queues_left);
2589 vfres->num_queue_pairs = pf->queues_left + cur_pairs;
2590 } else {
2591
2592 vf->num_req_queues = req_pairs;
2593 i40e_vc_notify_vf_reset(vf);
2594 i40e_reset_vf(vf, false);
2595 return 0;
2596 }
2597
2598 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
2599 (u8 *)vfres, sizeof(*vfres));
2600}
2601
2602
2603
2604
2605
2606
2607
2608
2609static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
2610{
2611 struct virtchnl_queue_select *vqs =
2612 (struct virtchnl_queue_select *)msg;
2613 struct i40e_pf *pf = vf->pf;
2614 struct i40e_eth_stats stats;
2615 i40e_status aq_ret = 0;
2616 struct i40e_vsi *vsi;
2617
2618 memset(&stats, 0, sizeof(struct i40e_eth_stats));
2619
2620 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2621 aq_ret = I40E_ERR_PARAM;
2622 goto error_param;
2623 }
2624
2625 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2626 aq_ret = I40E_ERR_PARAM;
2627 goto error_param;
2628 }
2629
2630 vsi = pf->vsi[vf->lan_vsi_idx];
2631 if (!vsi) {
2632 aq_ret = I40E_ERR_PARAM;
2633 goto error_param;
2634 }
2635 i40e_update_eth_stats(vsi);
2636 stats = vsi->eth_stats;
2637
2638error_param:
2639
2640 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
2641 (u8 *)&stats, sizeof(stats));
2642}
2643
2644
2645
2646
2647#define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
2648#define I40E_VC_MAX_VLAN_PER_VF 16
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667static inline int i40e_check_vf_permission(struct i40e_vf *vf,
2668 struct virtchnl_ether_addr_list *al)
2669{
2670 struct i40e_pf *pf = vf->pf;
2671 struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
2672 int mac2add_cnt = 0;
2673 int i;
2674
2675 for (i = 0; i < al->num_elements; i++) {
2676 struct i40e_mac_filter *f;
2677 u8 *addr = al->list[i].addr;
2678
2679 if (is_broadcast_ether_addr(addr) ||
2680 is_zero_ether_addr(addr)) {
2681 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
2682 addr);
2683 return I40E_ERR_INVALID_MAC_ADDR;
2684 }
2685
2686
2687
2688
2689
2690
2691
2692
2693 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2694 !is_multicast_ether_addr(addr) && vf->pf_set_mac &&
2695 !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
2696 dev_err(&pf->pdev->dev,
2697 "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
2698 return -EPERM;
2699 }
2700
2701
2702 f = i40e_find_mac(vsi, addr);
2703 if (!f)
2704 ++mac2add_cnt;
2705 }
2706
2707
2708
2709
2710
2711 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2712 (i40e_count_filters(vsi) + mac2add_cnt) >
2713 I40E_VC_MAX_MAC_ADDR_PER_VF) {
2714 dev_err(&pf->pdev->dev,
2715 "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
2716 return -EPERM;
2717 }
2718 return 0;
2719}
2720
2721
2722
2723
2724
2725
2726
2727
2728static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
2729{
2730 struct virtchnl_ether_addr_list *al =
2731 (struct virtchnl_ether_addr_list *)msg;
2732 struct i40e_pf *pf = vf->pf;
2733 struct i40e_vsi *vsi = NULL;
2734 i40e_status ret = 0;
2735 int i;
2736
2737 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2738 !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
2739 ret = I40E_ERR_PARAM;
2740 goto error_param;
2741 }
2742
2743 vsi = pf->vsi[vf->lan_vsi_idx];
2744
2745
2746
2747
2748 spin_lock_bh(&vsi->mac_filter_hash_lock);
2749
2750 ret = i40e_check_vf_permission(vf, al);
2751 if (ret) {
2752 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2753 goto error_param;
2754 }
2755
2756
2757 for (i = 0; i < al->num_elements; i++) {
2758 struct i40e_mac_filter *f;
2759
2760 f = i40e_find_mac(vsi, al->list[i].addr);
2761 if (!f) {
2762 f = i40e_add_mac_filter(vsi, al->list[i].addr);
2763
2764 if (!f) {
2765 dev_err(&pf->pdev->dev,
2766 "Unable to add MAC filter %pM for VF %d\n",
2767 al->list[i].addr, vf->vf_id);
2768 ret = I40E_ERR_PARAM;
2769 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2770 goto error_param;
2771 }
2772 if (is_valid_ether_addr(al->list[i].addr) &&
2773 is_zero_ether_addr(vf->default_lan_addr.addr))
2774 ether_addr_copy(vf->default_lan_addr.addr,
2775 al->list[i].addr);
2776 }
2777 }
2778 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2779
2780
2781 ret = i40e_sync_vsi_filters(vsi);
2782 if (ret)
2783 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2784 vf->vf_id, ret);
2785
2786error_param:
2787
2788 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
2789 ret);
2790}
2791
2792
2793
2794
2795
2796
2797
2798
2799static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
2800{
2801 struct virtchnl_ether_addr_list *al =
2802 (struct virtchnl_ether_addr_list *)msg;
2803 bool was_unimac_deleted = false;
2804 struct i40e_pf *pf = vf->pf;
2805 struct i40e_vsi *vsi = NULL;
2806 i40e_status ret = 0;
2807 int i;
2808
2809 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2810 !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
2811 ret = I40E_ERR_PARAM;
2812 goto error_param;
2813 }
2814
2815 for (i = 0; i < al->num_elements; i++) {
2816 if (is_broadcast_ether_addr(al->list[i].addr) ||
2817 is_zero_ether_addr(al->list[i].addr)) {
2818 dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
2819 al->list[i].addr, vf->vf_id);
2820 ret = I40E_ERR_INVALID_MAC_ADDR;
2821 goto error_param;
2822 }
2823 if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr))
2824 was_unimac_deleted = true;
2825 }
2826 vsi = pf->vsi[vf->lan_vsi_idx];
2827
2828 spin_lock_bh(&vsi->mac_filter_hash_lock);
2829
2830 for (i = 0; i < al->num_elements; i++)
2831 if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
2832 ret = I40E_ERR_INVALID_MAC_ADDR;
2833 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2834 goto error_param;
2835 }
2836
2837 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2838
2839
2840 ret = i40e_sync_vsi_filters(vsi);
2841 if (ret)
2842 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2843 vf->vf_id, ret);
2844
2845 if (vf->trusted && was_unimac_deleted) {
2846 struct i40e_mac_filter *f;
2847 struct hlist_node *h;
2848 u8 *macaddr = NULL;
2849 int bkt;
2850
2851
2852 spin_lock_bh(&vsi->mac_filter_hash_lock);
2853 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2854 if (is_valid_ether_addr(f->macaddr))
2855 macaddr = f->macaddr;
2856 }
2857 if (macaddr)
2858 ether_addr_copy(vf->default_lan_addr.addr, macaddr);
2859 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2860 }
2861error_param:
2862
2863 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, ret);
2864}
2865
2866
2867
2868
2869
2870
2871
2872
2873static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)
2874{
2875 struct virtchnl_vlan_filter_list *vfl =
2876 (struct virtchnl_vlan_filter_list *)msg;
2877 struct i40e_pf *pf = vf->pf;
2878 struct i40e_vsi *vsi = NULL;
2879 i40e_status aq_ret = 0;
2880 int i;
2881
2882 if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
2883 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2884 dev_err(&pf->pdev->dev,
2885 "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
2886 goto error_param;
2887 }
2888 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2889 !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
2890 aq_ret = I40E_ERR_PARAM;
2891 goto error_param;
2892 }
2893
2894 for (i = 0; i < vfl->num_elements; i++) {
2895 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
2896 aq_ret = I40E_ERR_PARAM;
2897 dev_err(&pf->pdev->dev,
2898 "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
2899 goto error_param;
2900 }
2901 }
2902 vsi = pf->vsi[vf->lan_vsi_idx];
2903 if (vsi->info.pvid) {
2904 aq_ret = I40E_ERR_PARAM;
2905 goto error_param;
2906 }
2907
2908 i40e_vlan_stripping_enable(vsi);
2909 for (i = 0; i < vfl->num_elements; i++) {
2910
2911 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
2912 if (!ret)
2913 vf->num_vlan++;
2914
2915 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
2916 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
2917 true,
2918 vfl->vlan_id[i],
2919 NULL);
2920 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
2921 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
2922 true,
2923 vfl->vlan_id[i],
2924 NULL);
2925
2926 if (ret)
2927 dev_err(&pf->pdev->dev,
2928 "Unable to add VLAN filter %d for VF %d, error %d\n",
2929 vfl->vlan_id[i], vf->vf_id, ret);
2930 }
2931
2932error_param:
2933
2934 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret);
2935}
2936
2937
2938
2939
2940
2941
2942
2943
2944static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
2945{
2946 struct virtchnl_vlan_filter_list *vfl =
2947 (struct virtchnl_vlan_filter_list *)msg;
2948 struct i40e_pf *pf = vf->pf;
2949 struct i40e_vsi *vsi = NULL;
2950 i40e_status aq_ret = 0;
2951 int i;
2952
2953 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2954 !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
2955 aq_ret = I40E_ERR_PARAM;
2956 goto error_param;
2957 }
2958
2959 for (i = 0; i < vfl->num_elements; i++) {
2960 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
2961 aq_ret = I40E_ERR_PARAM;
2962 goto error_param;
2963 }
2964 }
2965
2966 vsi = pf->vsi[vf->lan_vsi_idx];
2967 if (vsi->info.pvid) {
2968 if (vfl->num_elements > 1 || vfl->vlan_id[0])
2969 aq_ret = I40E_ERR_PARAM;
2970 goto error_param;
2971 }
2972
2973 for (i = 0; i < vfl->num_elements; i++) {
2974 i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
2975 vf->num_vlan--;
2976
2977 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
2978 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
2979 false,
2980 vfl->vlan_id[i],
2981 NULL);
2982 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
2983 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
2984 false,
2985 vfl->vlan_id[i],
2986 NULL);
2987 }
2988
2989error_param:
2990
2991 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret);
2992}
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
3003{
3004 struct i40e_pf *pf = vf->pf;
3005 int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
3006 i40e_status aq_ret = 0;
3007
3008 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3009 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
3010 aq_ret = I40E_ERR_PARAM;
3011 goto error_param;
3012 }
3013
3014 i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
3015 msg, msglen);
3016
3017error_param:
3018
3019 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP,
3020 aq_ret);
3021}
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config)
3032{
3033 struct virtchnl_iwarp_qvlist_info *qvlist_info =
3034 (struct virtchnl_iwarp_qvlist_info *)msg;
3035 i40e_status aq_ret = 0;
3036
3037 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3038 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
3039 aq_ret = I40E_ERR_PARAM;
3040 goto error_param;
3041 }
3042
3043 if (config) {
3044 if (i40e_config_iwarp_qvlist(vf, qvlist_info))
3045 aq_ret = I40E_ERR_PARAM;
3046 } else {
3047 i40e_release_iwarp_qvlist(vf);
3048 }
3049
3050error_param:
3051
3052 return i40e_vc_send_resp_to_vf(vf,
3053 config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP :
3054 VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
3055 aq_ret);
3056}
3057
3058
3059
3060
3061
3062
3063
3064
3065static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
3066{
3067 struct virtchnl_rss_key *vrk =
3068 (struct virtchnl_rss_key *)msg;
3069 struct i40e_pf *pf = vf->pf;
3070 struct i40e_vsi *vsi = NULL;
3071 i40e_status aq_ret = 0;
3072
3073 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3074 !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
3075 (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
3076 aq_ret = I40E_ERR_PARAM;
3077 goto err;
3078 }
3079
3080 vsi = pf->vsi[vf->lan_vsi_idx];
3081 aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
3082err:
3083
3084 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
3085 aq_ret);
3086}
3087
3088
3089
3090
3091
3092
3093
3094
3095static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
3096{
3097 struct virtchnl_rss_lut *vrl =
3098 (struct virtchnl_rss_lut *)msg;
3099 struct i40e_pf *pf = vf->pf;
3100 struct i40e_vsi *vsi = NULL;
3101 i40e_status aq_ret = 0;
3102 u16 i;
3103
3104 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3105 !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) ||
3106 (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
3107 aq_ret = I40E_ERR_PARAM;
3108 goto err;
3109 }
3110
3111 for (i = 0; i < vrl->lut_entries; i++)
3112 if (vrl->lut[i] >= vf->num_queue_pairs) {
3113 aq_ret = I40E_ERR_PARAM;
3114 goto err;
3115 }
3116
3117 vsi = pf->vsi[vf->lan_vsi_idx];
3118 aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
3119
3120err:
3121 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
3122 aq_ret);
3123}
3124
3125
3126
3127
3128
3129
3130
3131
3132static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
3133{
3134 struct virtchnl_rss_hena *vrh = NULL;
3135 struct i40e_pf *pf = vf->pf;
3136 i40e_status aq_ret = 0;
3137 int len = 0;
3138
3139 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3140 aq_ret = I40E_ERR_PARAM;
3141 goto err;
3142 }
3143 len = sizeof(struct virtchnl_rss_hena);
3144
3145 vrh = kzalloc(len, GFP_KERNEL);
3146 if (!vrh) {
3147 aq_ret = I40E_ERR_NO_MEMORY;
3148 len = 0;
3149 goto err;
3150 }
3151 vrh->hena = i40e_pf_get_default_rss_hena(pf);
3152err:
3153
3154 aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
3155 aq_ret, (u8 *)vrh, len);
3156 kfree(vrh);
3157 return aq_ret;
3158}
3159
3160
3161
3162
3163
3164
3165
3166
3167static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
3168{
3169 struct virtchnl_rss_hena *vrh =
3170 (struct virtchnl_rss_hena *)msg;
3171 struct i40e_pf *pf = vf->pf;
3172 struct i40e_hw *hw = &pf->hw;
3173 i40e_status aq_ret = 0;
3174
3175 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3176 aq_ret = I40E_ERR_PARAM;
3177 goto err;
3178 }
3179 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
3180 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
3181 (u32)(vrh->hena >> 32));
3182
3183
3184err:
3185 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
3186}
3187
3188
3189
3190
3191
3192
3193
3194
3195static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
3196{
3197 i40e_status aq_ret = 0;
3198 struct i40e_vsi *vsi;
3199
3200 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3201 aq_ret = I40E_ERR_PARAM;
3202 goto err;
3203 }
3204
3205 vsi = vf->pf->vsi[vf->lan_vsi_idx];
3206 i40e_vlan_stripping_enable(vsi);
3207
3208
3209err:
3210 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
3211 aq_ret);
3212}
3213
3214
3215
3216
3217
3218
3219
3220
3221static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
3222{
3223 i40e_status aq_ret = 0;
3224 struct i40e_vsi *vsi;
3225
3226 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3227 aq_ret = I40E_ERR_PARAM;
3228 goto err;
3229 }
3230
3231 vsi = vf->pf->vsi[vf->lan_vsi_idx];
3232 i40e_vlan_stripping_disable(vsi);
3233
3234
3235err:
3236 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
3237 aq_ret);
3238}
3239
3240
3241
3242
3243
3244
3245
3246
3247static int i40e_validate_cloud_filter(struct i40e_vf *vf,
3248 struct virtchnl_filter *tc_filter)
3249{
3250 struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec;
3251 struct virtchnl_l4_spec data = tc_filter->data.tcp_spec;
3252 struct i40e_pf *pf = vf->pf;
3253 struct i40e_vsi *vsi = NULL;
3254 struct i40e_mac_filter *f;
3255 struct hlist_node *h;
3256 bool found = false;
3257 int bkt;
3258
3259 if (!tc_filter->action) {
3260 dev_info(&pf->pdev->dev,
3261 "VF %d: Currently ADq doesn't support Drop Action\n",
3262 vf->vf_id);
3263 goto err;
3264 }
3265
3266
3267 if (!tc_filter->action_meta ||
3268 tc_filter->action_meta > I40E_MAX_VF_VSI) {
3269 dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
3270 vf->vf_id, tc_filter->action_meta);
3271 goto err;
3272 }
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283 if (mask.dst_mac[0] && !mask.dst_ip[0]) {
3284 vsi = pf->vsi[vf->lan_vsi_idx];
3285 f = i40e_find_mac(vsi, data.dst_mac);
3286
3287 if (!f) {
3288 dev_info(&pf->pdev->dev,
3289 "Destination MAC %pM doesn't belong to VF %d\n",
3290 data.dst_mac, vf->vf_id);
3291 goto err;
3292 }
3293
3294 if (mask.vlan_id) {
3295 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f,
3296 hlist) {
3297 if (f->vlan == ntohs(data.vlan_id)) {
3298 found = true;
3299 break;
3300 }
3301 }
3302 if (!found) {
3303 dev_info(&pf->pdev->dev,
3304 "VF %d doesn't have any VLAN id %u\n",
3305 vf->vf_id, ntohs(data.vlan_id));
3306 goto err;
3307 }
3308 }
3309 } else {
3310
3311 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
3312 dev_err(&pf->pdev->dev,
3313 "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n",
3314 vf->vf_id);
3315 return I40E_ERR_CONFIG;
3316 }
3317 }
3318
3319 if (mask.dst_mac[0] & data.dst_mac[0]) {
3320 if (is_broadcast_ether_addr(data.dst_mac) ||
3321 is_zero_ether_addr(data.dst_mac)) {
3322 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n",
3323 vf->vf_id, data.dst_mac);
3324 goto err;
3325 }
3326 }
3327
3328 if (mask.src_mac[0] & data.src_mac[0]) {
3329 if (is_broadcast_ether_addr(data.src_mac) ||
3330 is_zero_ether_addr(data.src_mac)) {
3331 dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n",
3332 vf->vf_id, data.src_mac);
3333 goto err;
3334 }
3335 }
3336
3337 if (mask.dst_port & data.dst_port) {
3338 if (!data.dst_port) {
3339 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n",
3340 vf->vf_id);
3341 goto err;
3342 }
3343 }
3344
3345 if (mask.src_port & data.src_port) {
3346 if (!data.src_port) {
3347 dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n",
3348 vf->vf_id);
3349 goto err;
3350 }
3351 }
3352
3353 if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW &&
3354 tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) {
3355 dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n",
3356 vf->vf_id);
3357 goto err;
3358 }
3359
3360 if (mask.vlan_id & data.vlan_id) {
3361 if (ntohs(data.vlan_id) > I40E_MAX_VLANID) {
3362 dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n",
3363 vf->vf_id);
3364 goto err;
3365 }
3366 }
3367
3368 return I40E_SUCCESS;
3369err:
3370 return I40E_ERR_CONFIG;
3371}
3372
3373
3374
3375
3376
3377
3378static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid)
3379{
3380 struct i40e_pf *pf = vf->pf;
3381 struct i40e_vsi *vsi = NULL;
3382 int i;
3383
3384 for (i = 0; i < vf->num_tc ; i++) {
3385 vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id);
3386 if (vsi && vsi->seid == seid)
3387 return vsi;
3388 }
3389 return NULL;
3390}
3391
3392
3393
3394
3395
3396
3397
3398static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
3399{
3400 struct i40e_cloud_filter *cfilter = NULL;
3401 struct i40e_pf *pf = vf->pf;
3402 struct i40e_vsi *vsi = NULL;
3403 struct hlist_node *node;
3404 int ret;
3405
3406 hlist_for_each_entry_safe(cfilter, node,
3407 &vf->cloud_filter_list, cloud_node) {
3408 vsi = i40e_find_vsi_from_seid(vf, cfilter->seid);
3409
3410 if (!vsi) {
3411 dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n",
3412 vf->vf_id, cfilter->seid);
3413 continue;
3414 }
3415
3416 if (cfilter->dst_port)
3417 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
3418 false);
3419 else
3420 ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
3421 if (ret)
3422 dev_err(&pf->pdev->dev,
3423 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
3424 vf->vf_id, i40e_stat_str(&pf->hw, ret),
3425 i40e_aq_str(&pf->hw,
3426 pf->hw.aq.asq_last_status));
3427
3428 hlist_del(&cfilter->cloud_node);
3429 kfree(cfilter);
3430 vf->num_cloud_filters--;
3431 }
3432}
3433
3434
3435
3436
3437
3438
3439
3440
3441static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
3442{
3443 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3444 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3445 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3446 struct i40e_cloud_filter cfilter, *cf = NULL;
3447 struct i40e_pf *pf = vf->pf;
3448 struct i40e_vsi *vsi = NULL;
3449 struct hlist_node *node;
3450 i40e_status aq_ret = 0;
3451 int i, ret;
3452
3453 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3454 aq_ret = I40E_ERR_PARAM;
3455 goto err;
3456 }
3457
3458 if (!vf->adq_enabled) {
3459 dev_info(&pf->pdev->dev,
3460 "VF %d: ADq not enabled, can't apply cloud filter\n",
3461 vf->vf_id);
3462 aq_ret = I40E_ERR_PARAM;
3463 goto err;
3464 }
3465
3466 if (i40e_validate_cloud_filter(vf, vcf)) {
3467 dev_info(&pf->pdev->dev,
3468 "VF %d: Invalid input, can't apply cloud filter\n",
3469 vf->vf_id);
3470 aq_ret = I40E_ERR_PARAM;
3471 goto err;
3472 }
3473
3474 memset(&cfilter, 0, sizeof(cfilter));
3475
3476 for (i = 0; i < ETH_ALEN; i++)
3477 cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3478
3479
3480 for (i = 0; i < ETH_ALEN; i++)
3481 cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3482
3483 cfilter.vlan_id = mask.vlan_id & tcf.vlan_id;
3484 cfilter.dst_port = mask.dst_port & tcf.dst_port;
3485 cfilter.src_port = mask.src_port & tcf.src_port;
3486
3487 switch (vcf->flow_type) {
3488 case VIRTCHNL_TCP_V4_FLOW:
3489 cfilter.n_proto = ETH_P_IP;
3490 if (mask.dst_ip[0] & tcf.dst_ip[0])
3491 memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip,
3492 ARRAY_SIZE(tcf.dst_ip));
3493 else if (mask.src_ip[0] & tcf.dst_ip[0])
3494 memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip,
3495 ARRAY_SIZE(tcf.dst_ip));
3496 break;
3497 case VIRTCHNL_TCP_V6_FLOW:
3498 cfilter.n_proto = ETH_P_IPV6;
3499 if (mask.dst_ip[3] & tcf.dst_ip[3])
3500 memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip,
3501 sizeof(cfilter.ip.v6.dst_ip6));
3502 if (mask.src_ip[3] & tcf.src_ip[3])
3503 memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip,
3504 sizeof(cfilter.ip.v6.src_ip6));
3505 break;
3506 default:
3507
3508
3509
3510 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3511 vf->vf_id);
3512 }
3513
3514
3515 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3516 cfilter.seid = vsi->seid;
3517 cfilter.flags = vcf->field_flags;
3518
3519
3520 if (tcf.dst_port)
3521 ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false);
3522 else
3523 ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
3524 if (ret) {
3525 dev_err(&pf->pdev->dev,
3526 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
3527 vf->vf_id, i40e_stat_str(&pf->hw, ret),
3528 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3529 goto err;
3530 }
3531
3532 hlist_for_each_entry_safe(cf, node,
3533 &vf->cloud_filter_list, cloud_node) {
3534 if (cf->seid != cfilter.seid)
3535 continue;
3536 if (mask.dst_port)
3537 if (cfilter.dst_port != cf->dst_port)
3538 continue;
3539 if (mask.dst_mac[0])
3540 if (!ether_addr_equal(cf->src_mac, cfilter.src_mac))
3541 continue;
3542
3543 if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0])
3544 if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip,
3545 ARRAY_SIZE(tcf.dst_ip)))
3546 continue;
3547
3548 if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3])
3549 if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6,
3550 sizeof(cfilter.ip.v6.src_ip6)))
3551 continue;
3552 if (mask.vlan_id)
3553 if (cfilter.vlan_id != cf->vlan_id)
3554 continue;
3555
3556 hlist_del(&cf->cloud_node);
3557 kfree(cf);
3558 vf->num_cloud_filters--;
3559 }
3560
3561err:
3562 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER,
3563 aq_ret);
3564}
3565
3566
3567
3568
3569
3570
3571
3572
3573static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
3574{
3575 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3576 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3577 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3578 struct i40e_cloud_filter *cfilter = NULL;
3579 struct i40e_pf *pf = vf->pf;
3580 struct i40e_vsi *vsi = NULL;
3581 i40e_status aq_ret = 0;
3582 int i, ret;
3583
3584 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3585 aq_ret = I40E_ERR_PARAM;
3586 goto err_out;
3587 }
3588
3589 if (!vf->adq_enabled) {
3590 dev_info(&pf->pdev->dev,
3591 "VF %d: ADq is not enabled, can't apply cloud filter\n",
3592 vf->vf_id);
3593 aq_ret = I40E_ERR_PARAM;
3594 goto err_out;
3595 }
3596
3597 if (i40e_validate_cloud_filter(vf, vcf)) {
3598 dev_info(&pf->pdev->dev,
3599 "VF %d: Invalid input/s, can't apply cloud filter\n",
3600 vf->vf_id);
3601 aq_ret = I40E_ERR_PARAM;
3602 goto err_out;
3603 }
3604
3605 cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
3606 if (!cfilter)
3607 return -ENOMEM;
3608
3609
3610 for (i = 0; i < ETH_ALEN; i++)
3611 cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3612
3613
3614 for (i = 0; i < ETH_ALEN; i++)
3615 cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3616
3617 cfilter->vlan_id = mask.vlan_id & tcf.vlan_id;
3618 cfilter->dst_port = mask.dst_port & tcf.dst_port;
3619 cfilter->src_port = mask.src_port & tcf.src_port;
3620
3621 switch (vcf->flow_type) {
3622 case VIRTCHNL_TCP_V4_FLOW:
3623 cfilter->n_proto = ETH_P_IP;
3624 if (mask.dst_ip[0] & tcf.dst_ip[0])
3625 memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip,
3626 ARRAY_SIZE(tcf.dst_ip));
3627 else if (mask.src_ip[0] & tcf.dst_ip[0])
3628 memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip,
3629 ARRAY_SIZE(tcf.dst_ip));
3630 break;
3631 case VIRTCHNL_TCP_V6_FLOW:
3632 cfilter->n_proto = ETH_P_IPV6;
3633 if (mask.dst_ip[3] & tcf.dst_ip[3])
3634 memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip,
3635 sizeof(cfilter->ip.v6.dst_ip6));
3636 if (mask.src_ip[3] & tcf.src_ip[3])
3637 memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip,
3638 sizeof(cfilter->ip.v6.src_ip6));
3639 break;
3640 default:
3641
3642
3643
3644 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3645 vf->vf_id);
3646 }
3647
3648
3649 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3650 cfilter->seid = vsi->seid;
3651 cfilter->flags = vcf->field_flags;
3652
3653
3654 if (tcf.dst_port)
3655 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
3656 else
3657 ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
3658 if (ret) {
3659 dev_err(&pf->pdev->dev,
3660 "VF %d: Failed to add cloud filter, err %s aq_err %s\n",
3661 vf->vf_id, i40e_stat_str(&pf->hw, ret),
3662 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3663 goto err_free;
3664 }
3665
3666 INIT_HLIST_NODE(&cfilter->cloud_node);
3667 hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list);
3668
3669 cfilter = NULL;
3670 vf->num_cloud_filters++;
3671err_free:
3672 kfree(cfilter);
3673err_out:
3674 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER,
3675 aq_ret);
3676}
3677
3678
3679
3680
3681
3682
3683static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
3684{
3685 struct virtchnl_tc_info *tci =
3686 (struct virtchnl_tc_info *)msg;
3687 struct i40e_pf *pf = vf->pf;
3688 struct i40e_link_status *ls = &pf->hw.phy.link_info;
3689 int i, adq_request_qps = 0;
3690 i40e_status aq_ret = 0;
3691 u64 speed = 0;
3692
3693 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3694 aq_ret = I40E_ERR_PARAM;
3695 goto err;
3696 }
3697
3698
3699 if (vf->spoofchk) {
3700 dev_err(&pf->pdev->dev,
3701 "Spoof check is ON, turn it OFF to enable ADq\n");
3702 aq_ret = I40E_ERR_PARAM;
3703 goto err;
3704 }
3705
3706 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) {
3707 dev_err(&pf->pdev->dev,
3708 "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n",
3709 vf->vf_id);
3710 aq_ret = I40E_ERR_PARAM;
3711 goto err;
3712 }
3713
3714
3715 if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) {
3716 dev_err(&pf->pdev->dev,
3717 "VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n",
3718 vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI);
3719 aq_ret = I40E_ERR_PARAM;
3720 goto err;
3721 }
3722
3723
3724 for (i = 0; i < tci->num_tc; i++)
3725 if (!tci->list[i].count ||
3726 tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) {
3727 dev_err(&pf->pdev->dev,
3728 "VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n",
3729 vf->vf_id, i, tci->list[i].count,
3730 I40E_DEFAULT_QUEUES_PER_VF);
3731 aq_ret = I40E_ERR_PARAM;
3732 goto err;
3733 }
3734
3735
3736 adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF;
3737
3738 if (pf->queues_left < adq_request_qps) {
3739 dev_err(&pf->pdev->dev,
3740 "No queues left to allocate to VF %d\n",
3741 vf->vf_id);
3742 aq_ret = I40E_ERR_PARAM;
3743 goto err;
3744 } else {
3745
3746
3747
3748
3749 vf->num_queue_pairs = I40E_MAX_VF_QUEUES;
3750 }
3751
3752
3753 speed = i40e_vc_link_speed2mbps(ls->link_speed);
3754 if (speed == SPEED_UNKNOWN) {
3755 dev_err(&pf->pdev->dev,
3756 "Cannot detect link speed\n");
3757 aq_ret = I40E_ERR_PARAM;
3758 goto err;
3759 }
3760
3761
3762 vf->num_tc = tci->num_tc;
3763 for (i = 0; i < vf->num_tc; i++) {
3764 if (tci->list[i].max_tx_rate) {
3765 if (tci->list[i].max_tx_rate > speed) {
3766 dev_err(&pf->pdev->dev,
3767 "Invalid max tx rate %llu specified for VF %d.",
3768 tci->list[i].max_tx_rate,
3769 vf->vf_id);
3770 aq_ret = I40E_ERR_PARAM;
3771 goto err;
3772 } else {
3773 vf->ch[i].max_tx_rate =
3774 tci->list[i].max_tx_rate;
3775 }
3776 }
3777 vf->ch[i].num_qps = tci->list[i].count;
3778 }
3779
3780
3781 vf->adq_enabled = true;
3782
3783
3784
3785
3786 vf->num_req_queues = 0;
3787
3788
3789 i40e_vc_notify_vf_reset(vf);
3790 i40e_reset_vf(vf, false);
3791
3792 return I40E_SUCCESS;
3793
3794
3795err:
3796 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS,
3797 aq_ret);
3798}
3799
3800
3801
3802
3803
3804
3805static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
3806{
3807 struct i40e_pf *pf = vf->pf;
3808 i40e_status aq_ret = 0;
3809
3810 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3811 aq_ret = I40E_ERR_PARAM;
3812 goto err;
3813 }
3814
3815 if (vf->adq_enabled) {
3816 i40e_del_all_cloud_filters(vf);
3817 i40e_del_qch(vf);
3818 vf->adq_enabled = false;
3819 vf->num_tc = 0;
3820 dev_info(&pf->pdev->dev,
3821 "Deleting Queue Channels and cloud filters for ADq on VF %d\n",
3822 vf->vf_id);
3823 } else {
3824 dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n",
3825 vf->vf_id);
3826 aq_ret = I40E_ERR_PARAM;
3827 }
3828
3829
3830 i40e_vc_notify_vf_reset(vf);
3831 i40e_reset_vf(vf, false);
3832
3833 return I40E_SUCCESS;
3834
3835err:
3836 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS,
3837 aq_ret);
3838}
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848
3849
3850
3851
3852int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
3853 u32 __always_unused v_retval, u8 *msg, u16 msglen)
3854{
3855 struct i40e_hw *hw = &pf->hw;
3856 int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
3857 struct i40e_vf *vf;
3858 int ret;
3859
3860 pf->vf_aq_requests++;
3861 if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs)
3862 return -EINVAL;
3863 vf = &(pf->vf[local_vf_id]);
3864
3865
3866 if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
3867 return I40E_ERR_PARAM;
3868
3869
3870 ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
3871
3872 if (ret) {
3873 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
3874 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
3875 local_vf_id, v_opcode, msglen);
3876 switch (ret) {
3877 case VIRTCHNL_STATUS_ERR_PARAM:
3878 return -EPERM;
3879 default:
3880 return -EINVAL;
3881 }
3882 }
3883
3884 switch (v_opcode) {
3885 case VIRTCHNL_OP_VERSION:
3886 ret = i40e_vc_get_version_msg(vf, msg);
3887 break;
3888 case VIRTCHNL_OP_GET_VF_RESOURCES:
3889 ret = i40e_vc_get_vf_resources_msg(vf, msg);
3890 i40e_vc_notify_vf_link_state(vf);
3891 break;
3892 case VIRTCHNL_OP_RESET_VF:
3893 i40e_vc_reset_vf_msg(vf);
3894 ret = 0;
3895 break;
3896 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
3897 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg);
3898 break;
3899 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
3900 ret = i40e_vc_config_queues_msg(vf, msg);
3901 break;
3902 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
3903 ret = i40e_vc_config_irq_map_msg(vf, msg);
3904 break;
3905 case VIRTCHNL_OP_ENABLE_QUEUES:
3906 ret = i40e_vc_enable_queues_msg(vf, msg);
3907 i40e_vc_notify_vf_link_state(vf);
3908 break;
3909 case VIRTCHNL_OP_DISABLE_QUEUES:
3910 ret = i40e_vc_disable_queues_msg(vf, msg);
3911 break;
3912 case VIRTCHNL_OP_ADD_ETH_ADDR:
3913 ret = i40e_vc_add_mac_addr_msg(vf, msg);
3914 break;
3915 case VIRTCHNL_OP_DEL_ETH_ADDR:
3916 ret = i40e_vc_del_mac_addr_msg(vf, msg);
3917 break;
3918 case VIRTCHNL_OP_ADD_VLAN:
3919 ret = i40e_vc_add_vlan_msg(vf, msg);
3920 break;
3921 case VIRTCHNL_OP_DEL_VLAN:
3922 ret = i40e_vc_remove_vlan_msg(vf, msg);
3923 break;
3924 case VIRTCHNL_OP_GET_STATS:
3925 ret = i40e_vc_get_stats_msg(vf, msg);
3926 break;
3927 case VIRTCHNL_OP_IWARP:
3928 ret = i40e_vc_iwarp_msg(vf, msg, msglen);
3929 break;
3930 case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
3931 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, true);
3932 break;
3933 case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
3934 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, false);
3935 break;
3936 case VIRTCHNL_OP_CONFIG_RSS_KEY:
3937 ret = i40e_vc_config_rss_key(vf, msg);
3938 break;
3939 case VIRTCHNL_OP_CONFIG_RSS_LUT:
3940 ret = i40e_vc_config_rss_lut(vf, msg);
3941 break;
3942 case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
3943 ret = i40e_vc_get_rss_hena(vf, msg);
3944 break;
3945 case VIRTCHNL_OP_SET_RSS_HENA:
3946 ret = i40e_vc_set_rss_hena(vf, msg);
3947 break;
3948 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
3949 ret = i40e_vc_enable_vlan_stripping(vf, msg);
3950 break;
3951 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
3952 ret = i40e_vc_disable_vlan_stripping(vf, msg);
3953 break;
3954 case VIRTCHNL_OP_REQUEST_QUEUES:
3955 ret = i40e_vc_request_queues_msg(vf, msg);
3956 break;
3957 case VIRTCHNL_OP_ENABLE_CHANNELS:
3958 ret = i40e_vc_add_qch_msg(vf, msg);
3959 break;
3960 case VIRTCHNL_OP_DISABLE_CHANNELS:
3961 ret = i40e_vc_del_qch_msg(vf, msg);
3962 break;
3963 case VIRTCHNL_OP_ADD_CLOUD_FILTER:
3964 ret = i40e_vc_add_cloud_filter(vf, msg);
3965 break;
3966 case VIRTCHNL_OP_DEL_CLOUD_FILTER:
3967 ret = i40e_vc_del_cloud_filter(vf, msg);
3968 break;
3969 case VIRTCHNL_OP_UNKNOWN:
3970 default:
3971 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
3972 v_opcode, local_vf_id);
3973 ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
3974 I40E_ERR_NOT_IMPLEMENTED);
3975 break;
3976 }
3977
3978 return ret;
3979}
3980
3981
3982
3983
3984
3985
3986
3987
3988int i40e_vc_process_vflr_event(struct i40e_pf *pf)
3989{
3990 struct i40e_hw *hw = &pf->hw;
3991 u32 reg, reg_idx, bit_idx;
3992 struct i40e_vf *vf;
3993 int vf_id;
3994
3995 if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
3996 return 0;
3997
3998
3999
4000
4001
4002
4003 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4004 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
4005 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4006 i40e_flush(hw);
4007
4008 clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
4009 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
4010 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
4011 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
4012
4013 vf = &pf->vf[vf_id];
4014 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
4015 if (reg & BIT(bit_idx))
4016
4017 i40e_reset_vf(vf, true);
4018 }
4019
4020 return 0;
4021}
4022
4023
4024
4025
4026
4027
4028
4029
4030
4031
4032static int i40e_validate_vf(struct i40e_pf *pf, int vf_id)
4033{
4034 struct i40e_vsi *vsi;
4035 struct i40e_vf *vf;
4036 int ret = 0;
4037
4038 if (vf_id >= pf->num_alloc_vfs) {
4039 dev_err(&pf->pdev->dev,
4040 "Invalid VF Identifier %d\n", vf_id);
4041 ret = -EINVAL;
4042 goto err_out;
4043 }
4044 vf = &pf->vf[vf_id];
4045 vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id);
4046 if (!vsi)
4047 ret = -EINVAL;
4048err_out:
4049 return ret;
4050}
4051
4052
4053
4054
4055
4056
4057
4058
4059
4060int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
4061{
4062 struct i40e_netdev_priv *np = netdev_priv(netdev);
4063 struct i40e_vsi *vsi = np->vsi;
4064 struct i40e_pf *pf = vsi->back;
4065 struct i40e_mac_filter *f;
4066 struct i40e_vf *vf;
4067 int ret = 0;
4068 struct hlist_node *h;
4069 int bkt;
4070 u8 i;
4071
4072 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4073 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4074 return -EAGAIN;
4075 }
4076
4077
4078 ret = i40e_validate_vf(pf, vf_id);
4079 if (ret)
4080 goto error_param;
4081
4082 vf = &pf->vf[vf_id];
4083
4084
4085
4086
4087
4088
4089
4090 for (i = 0; i < 15; i++) {
4091 if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
4092 break;
4093 msleep(20);
4094 }
4095 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4096 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4097 vf_id);
4098 ret = -EAGAIN;
4099 goto error_param;
4100 }
4101 vsi = pf->vsi[vf->lan_vsi_idx];
4102
4103 if (is_multicast_ether_addr(mac)) {
4104 dev_err(&pf->pdev->dev,
4105 "Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
4106 ret = -EINVAL;
4107 goto error_param;
4108 }
4109
4110
4111
4112
4113 spin_lock_bh(&vsi->mac_filter_hash_lock);
4114
4115
4116 if (!is_zero_ether_addr(vf->default_lan_addr.addr))
4117 i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
4118
4119
4120
4121
4122 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
4123 __i40e_del_filter(vsi, f);
4124
4125 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4126
4127
4128 if (i40e_sync_vsi_filters(vsi)) {
4129 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
4130 ret = -EIO;
4131 goto error_param;
4132 }
4133 ether_addr_copy(vf->default_lan_addr.addr, mac);
4134
4135 if (is_zero_ether_addr(mac)) {
4136 vf->pf_set_mac = false;
4137 dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id);
4138 } else {
4139 vf->pf_set_mac = true;
4140 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n",
4141 mac, vf_id);
4142 }
4143
4144
4145
4146
4147 i40e_vc_disable_vf(vf);
4148 dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n");
4149
4150error_param:
4151 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4152 return ret;
4153}
4154
4155
4156
4157
4158
4159
4160
4161
4162
4163static bool i40e_vsi_has_vlans(struct i40e_vsi *vsi)
4164{
4165 bool have_vlans;
4166
4167
4168
4169
4170 if (vsi->info.pvid)
4171 return false;
4172
4173
4174
4175
4176 spin_lock_bh(&vsi->mac_filter_hash_lock);
4177 have_vlans = i40e_is_vsi_in_vlan(vsi);
4178 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4179
4180 return have_vlans;
4181}
4182
4183
4184
4185
4186
4187
4188
4189
4190
4191
4192
4193int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
4194 u16 vlan_id, u8 qos, __be16 vlan_proto)
4195{
4196 u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
4197 struct i40e_netdev_priv *np = netdev_priv(netdev);
4198 bool allmulti = false, alluni = false;
4199 struct i40e_pf *pf = np->vsi->back;
4200 struct i40e_vsi *vsi;
4201 struct i40e_vf *vf;
4202 int ret = 0;
4203
4204 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4205 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4206 return -EAGAIN;
4207 }
4208
4209
4210 ret = i40e_validate_vf(pf, vf_id);
4211 if (ret)
4212 goto error_pvid;
4213
4214 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
4215 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
4216 ret = -EINVAL;
4217 goto error_pvid;
4218 }
4219
4220 if (vlan_proto != htons(ETH_P_8021Q)) {
4221 dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
4222 ret = -EPROTONOSUPPORT;
4223 goto error_pvid;
4224 }
4225
4226 vf = &pf->vf[vf_id];
4227 vsi = pf->vsi[vf->lan_vsi_idx];
4228 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4229 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4230 vf_id);
4231 ret = -EAGAIN;
4232 goto error_pvid;
4233 }
4234
4235 if (le16_to_cpu(vsi->info.pvid) == vlanprio)
4236
4237 goto error_pvid;
4238
4239 if (i40e_vsi_has_vlans(vsi)) {
4240 dev_err(&pf->pdev->dev,
4241 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
4242 vf_id);
4243
4244
4245
4246
4247 i40e_vc_disable_vf(vf);
4248
4249 vsi = pf->vsi[vf->lan_vsi_idx];
4250 }
4251
4252
4253 spin_lock_bh(&vsi->mac_filter_hash_lock);
4254
4255
4256
4257
4258
4259
4260
4261
4262
4263 if ((!(vlan_id || qos) ||
4264 vlanprio != le16_to_cpu(vsi->info.pvid)) &&
4265 vsi->info.pvid) {
4266 ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
4267 if (ret) {
4268 dev_info(&vsi->back->pdev->dev,
4269 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4270 vsi->back->hw.aq.asq_last_status);
4271 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4272 goto error_pvid;
4273 }
4274 }
4275
4276 if (vsi->info.pvid) {
4277
4278 i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) &
4279 VLAN_VID_MASK));
4280 }
4281
4282 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4283
4284
4285 ret = i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id,
4286 allmulti, alluni);
4287 if (ret) {
4288 dev_err(&pf->pdev->dev, "Unable to config VF promiscuous mode\n");
4289 goto error_pvid;
4290 }
4291
4292 if (vlan_id || qos)
4293 ret = i40e_vsi_add_pvid(vsi, vlanprio);
4294 else
4295 i40e_vsi_remove_pvid(vsi);
4296 spin_lock_bh(&vsi->mac_filter_hash_lock);
4297
4298 if (vlan_id) {
4299 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
4300 vlan_id, qos, vf_id);
4301
4302
4303 ret = i40e_add_vlan_all_mac(vsi, vlan_id);
4304 if (ret) {
4305 dev_info(&vsi->back->pdev->dev,
4306 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4307 vsi->back->hw.aq.asq_last_status);
4308 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4309 goto error_pvid;
4310 }
4311
4312
4313 i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY);
4314 }
4315
4316 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4317
4318 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
4319 alluni = true;
4320
4321 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
4322 allmulti = true;
4323
4324
4325 i40e_service_event_schedule(vsi->back);
4326
4327 if (ret) {
4328 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
4329 goto error_pvid;
4330 }
4331
4332
4333
4334
4335 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
4336
4337 ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni);
4338 if (ret) {
4339 dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n");
4340 goto error_pvid;
4341 }
4342
4343 ret = 0;
4344
4345error_pvid:
4346 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4347 return ret;
4348}
4349
4350
4351
4352
4353
4354
4355
4356
4357
4358
4359int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
4360 int max_tx_rate)
4361{
4362 struct i40e_netdev_priv *np = netdev_priv(netdev);
4363 struct i40e_pf *pf = np->vsi->back;
4364 struct i40e_vsi *vsi;
4365 struct i40e_vf *vf;
4366 int ret = 0;
4367
4368 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4369 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4370 return -EAGAIN;
4371 }
4372
4373
4374 ret = i40e_validate_vf(pf, vf_id);
4375 if (ret)
4376 goto error;
4377
4378 if (min_tx_rate) {
4379 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
4380 min_tx_rate, vf_id);
4381 ret = -EINVAL;
4382 goto error;
4383 }
4384
4385 vf = &pf->vf[vf_id];
4386 vsi = pf->vsi[vf->lan_vsi_idx];
4387 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4388 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4389 vf_id);
4390 ret = -EAGAIN;
4391 goto error;
4392 }
4393
4394 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
4395 if (ret)
4396 goto error;
4397
4398 vf->tx_rate = max_tx_rate;
4399error:
4400 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4401 return ret;
4402}
4403
4404
4405
4406
4407
4408
4409
4410
4411
4412int i40e_ndo_get_vf_config(struct net_device *netdev,
4413 int vf_id, struct ifla_vf_info *ivi)
4414{
4415 struct i40e_netdev_priv *np = netdev_priv(netdev);
4416 struct i40e_vsi *vsi = np->vsi;
4417 struct i40e_pf *pf = vsi->back;
4418 struct i40e_vf *vf;
4419 int ret = 0;
4420
4421 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4422 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4423 return -EAGAIN;
4424 }
4425
4426
4427 ret = i40e_validate_vf(pf, vf_id);
4428 if (ret)
4429 goto error_param;
4430
4431 vf = &pf->vf[vf_id];
4432
4433 vsi = pf->vsi[vf->lan_vsi_idx];
4434 if (!vsi) {
4435 ret = -ENOENT;
4436 goto error_param;
4437 }
4438
4439 ivi->vf = vf_id;
4440
4441 ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
4442
4443 ivi->max_tx_rate = vf->tx_rate;
4444 ivi->min_tx_rate = 0;
4445 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
4446 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
4447 I40E_VLAN_PRIORITY_SHIFT;
4448 if (vf->link_forced == false)
4449 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
4450 else if (vf->link_up == true)
4451 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
4452 else
4453 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
4454 ivi->spoofchk = vf->spoofchk;
4455 ivi->trusted = vf->trusted;
4456 ret = 0;
4457
4458error_param:
4459 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4460 return ret;
4461}
4462
4463
4464
4465
4466
4467
4468
4469
4470
4471int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
4472{
4473 struct i40e_netdev_priv *np = netdev_priv(netdev);
4474 struct i40e_pf *pf = np->vsi->back;
4475 struct i40e_link_status *ls = &pf->hw.phy.link_info;
4476 struct virtchnl_pf_event pfe;
4477 struct i40e_hw *hw = &pf->hw;
4478 struct i40e_vf *vf;
4479 int abs_vf_id;
4480 int ret = 0;
4481
4482 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4483 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4484 return -EAGAIN;
4485 }
4486
4487
4488 if (vf_id >= pf->num_alloc_vfs) {
4489 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4490 ret = -EINVAL;
4491 goto error_out;
4492 }
4493
4494 vf = &pf->vf[vf_id];
4495 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
4496
4497 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
4498 pfe.severity = PF_EVENT_SEVERITY_INFO;
4499
4500 switch (link) {
4501 case IFLA_VF_LINK_STATE_AUTO:
4502 vf->link_forced = false;
4503 i40e_set_vf_link_state(vf, &pfe, ls);
4504 break;
4505 case IFLA_VF_LINK_STATE_ENABLE:
4506 vf->link_forced = true;
4507 vf->link_up = true;
4508 i40e_set_vf_link_state(vf, &pfe, ls);
4509 break;
4510 case IFLA_VF_LINK_STATE_DISABLE:
4511 vf->link_forced = true;
4512 vf->link_up = false;
4513 i40e_set_vf_link_state(vf, &pfe, ls);
4514 break;
4515 default:
4516 ret = -EINVAL;
4517 goto error_out;
4518 }
4519
4520 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
4521 0, (u8 *)&pfe, sizeof(pfe), NULL);
4522
4523error_out:
4524 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4525 return ret;
4526}
4527
4528
4529
4530
4531
4532
4533
4534
4535
4536int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
4537{
4538 struct i40e_netdev_priv *np = netdev_priv(netdev);
4539 struct i40e_vsi *vsi = np->vsi;
4540 struct i40e_pf *pf = vsi->back;
4541 struct i40e_vsi_context ctxt;
4542 struct i40e_hw *hw = &pf->hw;
4543 struct i40e_vf *vf;
4544 int ret = 0;
4545
4546 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4547 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4548 return -EAGAIN;
4549 }
4550
4551
4552 if (vf_id >= pf->num_alloc_vfs) {
4553 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4554 ret = -EINVAL;
4555 goto out;
4556 }
4557
4558 vf = &(pf->vf[vf_id]);
4559 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4560 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4561 vf_id);
4562 ret = -EAGAIN;
4563 goto out;
4564 }
4565
4566 if (enable == vf->spoofchk)
4567 goto out;
4568
4569 vf->spoofchk = enable;
4570 memset(&ctxt, 0, sizeof(ctxt));
4571 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
4572 ctxt.pf_num = pf->hw.pf_id;
4573 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
4574 if (enable)
4575 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
4576 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
4577 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4578 if (ret) {
4579 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
4580 ret);
4581 ret = -EIO;
4582 }
4583out:
4584 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4585 return ret;
4586}
4587
4588
4589
4590
4591
4592
4593
4594
4595
4596int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
4597{
4598 struct i40e_netdev_priv *np = netdev_priv(netdev);
4599 struct i40e_pf *pf = np->vsi->back;
4600 struct i40e_vf *vf;
4601 int ret = 0;
4602
4603 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4604 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4605 return -EAGAIN;
4606 }
4607
4608
4609 if (vf_id >= pf->num_alloc_vfs) {
4610 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4611 ret = -EINVAL;
4612 goto out;
4613 }
4614
4615 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
4616 dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
4617 ret = -EINVAL;
4618 goto out;
4619 }
4620
4621 vf = &pf->vf[vf_id];
4622
4623 if (setting == vf->trusted)
4624 goto out;
4625
4626 vf->trusted = setting;
4627 i40e_vc_disable_vf(vf);
4628 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
4629 vf_id, setting ? "" : "un");
4630
4631 if (vf->adq_enabled) {
4632 if (!vf->trusted) {
4633 dev_info(&pf->pdev->dev,
4634 "VF %u no longer Trusted, deleting all cloud filters\n",
4635 vf_id);
4636 i40e_del_all_cloud_filters(vf);
4637 }
4638 }
4639
4640out:
4641 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4642 return ret;
4643}
4644
4645
4646
4647
4648
4649
4650
4651int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
4652 struct ifla_vf_stats *vf_stats)
4653{
4654 struct i40e_netdev_priv *np = netdev_priv(netdev);
4655 struct i40e_pf *pf = np->vsi->back;
4656 struct i40e_eth_stats *stats;
4657 struct i40e_vsi *vsi;
4658 struct i40e_vf *vf;
4659
4660
4661 if (i40e_validate_vf(pf, vf_id))
4662 return -EINVAL;
4663
4664 vf = &pf->vf[vf_id];
4665 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4666 dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
4667 return -EBUSY;
4668 }
4669
4670 vsi = pf->vsi[vf->lan_vsi_idx];
4671 if (!vsi)
4672 return -EINVAL;
4673
4674 i40e_update_eth_stats(vsi);
4675 stats = &vsi->eth_stats;
4676
4677 memset(vf_stats, 0, sizeof(*vf_stats));
4678
4679 vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
4680 stats->rx_multicast;
4681 vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
4682 stats->tx_multicast;
4683 vf_stats->rx_bytes = stats->rx_bytes;
4684 vf_stats->tx_bytes = stats->tx_bytes;
4685 vf_stats->broadcast = stats->rx_broadcast;
4686 vf_stats->multicast = stats->rx_multicast;
4687 vf_stats->rx_dropped = stats->rx_discards;
4688 vf_stats->tx_dropped = stats->tx_discards;
4689
4690 return 0;
4691}
4692