1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include "i40e.h"
28
29
30
31
32
33
34
35
36
37
38
39
40
41static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
42 enum virtchnl_ops v_opcode,
43 i40e_status v_retval, u8 *msg,
44 u16 msglen)
45{
46 struct i40e_hw *hw = &pf->hw;
47 struct i40e_vf *vf = pf->vf;
48 int i;
49
50 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
51 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
52
53 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
54 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
55 continue;
56
57
58
59
60 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
61 msg, msglen, NULL);
62 }
63}
64
65
66
67
68
69
70
71static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
72{
73 struct virtchnl_pf_event pfe;
74 struct i40e_pf *pf = vf->pf;
75 struct i40e_hw *hw = &pf->hw;
76 struct i40e_link_status *ls = &pf->hw.phy.link_info;
77 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
78
79 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
80 pfe.severity = PF_EVENT_SEVERITY_INFO;
81 if (vf->link_forced) {
82 pfe.event_data.link_event.link_status = vf->link_up;
83 pfe.event_data.link_event.link_speed =
84 (vf->link_up ? I40E_LINK_SPEED_40GB : 0);
85 } else {
86 pfe.event_data.link_event.link_status =
87 ls->link_info & I40E_AQ_LINK_UP;
88 pfe.event_data.link_event.link_speed =
89 (enum virtchnl_link_speed)ls->link_speed;
90 }
91 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
92 0, (u8 *)&pfe, sizeof(pfe), NULL);
93}
94
95
96
97
98
99
100
101void i40e_vc_notify_link_state(struct i40e_pf *pf)
102{
103 int i;
104
105 for (i = 0; i < pf->num_alloc_vfs; i++)
106 i40e_vc_notify_vf_link_state(&pf->vf[i]);
107}
108
109
110
111
112
113
114
115void i40e_vc_notify_reset(struct i40e_pf *pf)
116{
117 struct virtchnl_pf_event pfe;
118
119 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
120 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
121 i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0,
122 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
123}
124
125
126
127
128
129
130
131void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
132{
133 struct virtchnl_pf_event pfe;
134 int abs_vf_id;
135
136
137 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
138 return;
139
140
141 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
142 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
143 return;
144
145 abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
146
147 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
148 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
149 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT,
150 0, (u8 *)&pfe,
151 sizeof(struct virtchnl_pf_event), NULL);
152}
153
154
155
156
157
158
159
160
161static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
162{
163 int i;
164
165 i40e_vc_notify_vf_reset(vf);
166
167
168
169
170
171
172 for (i = 0; i < 20; i++) {
173 if (i40e_reset_vf(vf, false))
174 return;
175 usleep_range(10000, 20000);
176 }
177
178 dev_warn(&vf->pf->pdev->dev,
179 "Failed to initiate reset for VF %d after 200 milliseconds\n",
180 vf->vf_id);
181}
182
183
184
185
186
187
188
189
190static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
191{
192 struct i40e_pf *pf = vf->pf;
193 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
194
195 return (vsi && (vsi->vf_id == vf->vf_id));
196}
197
198
199
200
201
202
203
204
205
206static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
207 u8 qid)
208{
209 struct i40e_pf *pf = vf->pf;
210 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
211
212 return (vsi && (qid < vsi->alloc_queue_pairs));
213}
214
215
216
217
218
219
220
221
222static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
223{
224 struct i40e_pf *pf = vf->pf;
225
226 return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
227}
228
229
230
231
232
233
234
235
236
237
238
239static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
240 u8 vsi_queue_id)
241{
242 struct i40e_pf *pf = vf->pf;
243 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
244 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
245
246 if (!vsi)
247 return pf_queue_id;
248
249 if (le16_to_cpu(vsi->info.mapping_flags) &
250 I40E_AQ_VSI_QUE_MAP_NONCONTIG)
251 pf_queue_id =
252 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
253 else
254 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
255 vsi_queue_id;
256
257 return pf_queue_id;
258}
259
260
261
262
263
264
265
266
267
268static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
269 struct virtchnl_vector_map *vecmap)
270{
271 unsigned long linklistmap = 0, tempmap;
272 struct i40e_pf *pf = vf->pf;
273 struct i40e_hw *hw = &pf->hw;
274 u16 vsi_queue_id, pf_queue_id;
275 enum i40e_queue_type qtype;
276 u16 next_q, vector_id, size;
277 u32 reg, reg_idx;
278 u16 itr_idx = 0;
279
280 vector_id = vecmap->vector_id;
281
282 if (0 == vector_id)
283 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
284 else
285 reg_idx = I40E_VPINT_LNKLSTN(
286 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
287 (vector_id - 1));
288
289 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
290
291 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
292 goto irq_list_done;
293 }
294 tempmap = vecmap->rxq_map;
295 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
296 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
297 vsi_queue_id));
298 }
299
300 tempmap = vecmap->txq_map;
301 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
302 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
303 vsi_queue_id + 1));
304 }
305
306 size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES;
307 next_q = find_first_bit(&linklistmap, size);
308 if (unlikely(next_q == size))
309 goto irq_list_done;
310
311 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
312 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
313 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
314 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
315
316 wr32(hw, reg_idx, reg);
317
318 while (next_q < size) {
319 switch (qtype) {
320 case I40E_QUEUE_TYPE_RX:
321 reg_idx = I40E_QINT_RQCTL(pf_queue_id);
322 itr_idx = vecmap->rxitr_idx;
323 break;
324 case I40E_QUEUE_TYPE_TX:
325 reg_idx = I40E_QINT_TQCTL(pf_queue_id);
326 itr_idx = vecmap->txitr_idx;
327 break;
328 default:
329 break;
330 }
331
332 next_q = find_next_bit(&linklistmap, size, next_q + 1);
333 if (next_q < size) {
334 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
335 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
336 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id,
337 vsi_queue_id);
338 } else {
339 pf_queue_id = I40E_QUEUE_END_OF_LIST;
340 qtype = 0;
341 }
342
343
344 reg = (vector_id) |
345 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
346 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
347 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
348 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
349 wr32(hw, reg_idx, reg);
350 }
351
352
353
354
355 if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
356 (vector_id == 0)) {
357 reg = rd32(hw, I40E_GLINT_CTL);
358 if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
359 reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
360 wr32(hw, I40E_GLINT_CTL, reg);
361 }
362 }
363
364irq_list_done:
365 i40e_flush(hw);
366}
367
368
369
370
371
372
373static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
374{
375 struct i40e_pf *pf = vf->pf;
376 struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info;
377 u32 msix_vf;
378 u32 i;
379
380 if (!vf->qvlist_info)
381 return;
382
383 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
384 for (i = 0; i < qvlist_info->num_vectors; i++) {
385 struct virtchnl_iwarp_qv_info *qv_info;
386 u32 next_q_index, next_q_type;
387 struct i40e_hw *hw = &pf->hw;
388 u32 v_idx, reg_idx, reg;
389
390 qv_info = &qvlist_info->qv_info[i];
391 if (!qv_info)
392 continue;
393 v_idx = qv_info->v_idx;
394 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
395
396
397
398 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
399 reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
400 next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK)
401 >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT;
402 next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK)
403 >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT;
404
405 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
406 reg = (next_q_index &
407 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
408 (next_q_type <<
409 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
410
411 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
412 }
413 }
414 kfree(vf->qvlist_info);
415 vf->qvlist_info = NULL;
416}
417
418
419
420
421
422
423
424
425static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
426 struct virtchnl_iwarp_qvlist_info *qvlist_info)
427{
428 struct i40e_pf *pf = vf->pf;
429 struct i40e_hw *hw = &pf->hw;
430 struct virtchnl_iwarp_qv_info *qv_info;
431 u32 v_idx, i, reg_idx, reg;
432 u32 next_q_idx, next_q_type;
433 u32 msix_vf, size;
434
435 size = sizeof(struct virtchnl_iwarp_qvlist_info) +
436 (sizeof(struct virtchnl_iwarp_qv_info) *
437 (qvlist_info->num_vectors - 1));
438 vf->qvlist_info = kzalloc(size, GFP_KERNEL);
439 if (!vf->qvlist_info)
440 return -ENOMEM;
441
442 vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
443
444 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
445 for (i = 0; i < qvlist_info->num_vectors; i++) {
446 qv_info = &qvlist_info->qv_info[i];
447 if (!qv_info)
448 continue;
449 v_idx = qv_info->v_idx;
450
451
452 if (!i40e_vc_isvalid_vector_id(vf, v_idx))
453 goto err;
454
455 vf->qvlist_info->qv_info[i] = *qv_info;
456
457 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
458
459
460
461
462 reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
463 next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >>
464 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT);
465 next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >>
466 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
467
468 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
469 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
470 reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK |
471 (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) |
472 (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) |
473 (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) |
474 (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT));
475 wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg);
476
477 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
478 reg = (qv_info->ceq_idx &
479 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
480 (I40E_QUEUE_TYPE_PE_CEQ <<
481 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
482 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
483 }
484
485 if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
486 reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK |
487 (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) |
488 (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT));
489
490 wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg);
491 }
492 }
493
494 return 0;
495err:
496 kfree(vf->qvlist_info);
497 vf->qvlist_info = NULL;
498 return -EINVAL;
499}
500
501
502
503
504
505
506
507
508
509
510static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
511 u16 vsi_queue_id,
512 struct virtchnl_txq_info *info)
513{
514 struct i40e_pf *pf = vf->pf;
515 struct i40e_hw *hw = &pf->hw;
516 struct i40e_hmc_obj_txq tx_ctx;
517 struct i40e_vsi *vsi;
518 u16 pf_queue_id;
519 u32 qtx_ctl;
520 int ret = 0;
521
522 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
523 ret = -ENOENT;
524 goto error_context;
525 }
526 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
527 vsi = i40e_find_vsi_from_id(pf, vsi_id);
528 if (!vsi) {
529 ret = -ENOENT;
530 goto error_context;
531 }
532
533
534 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
535
536
537 tx_ctx.base = info->dma_ring_addr / 128;
538 tx_ctx.qlen = info->ring_len;
539 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
540 tx_ctx.rdylist_act = 0;
541 tx_ctx.head_wb_ena = info->headwb_enabled;
542 tx_ctx.head_wb_addr = info->dma_headwb_addr;
543
544
545 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
546 if (ret) {
547 dev_err(&pf->pdev->dev,
548 "Failed to clear VF LAN Tx queue context %d, error: %d\n",
549 pf_queue_id, ret);
550 ret = -ENOENT;
551 goto error_context;
552 }
553
554
555 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
556 if (ret) {
557 dev_err(&pf->pdev->dev,
558 "Failed to set VF LAN Tx queue context %d error: %d\n",
559 pf_queue_id, ret);
560 ret = -ENOENT;
561 goto error_context;
562 }
563
564
565 qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
566 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
567 & I40E_QTX_CTL_PF_INDX_MASK);
568 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
569 << I40E_QTX_CTL_VFVM_INDX_SHIFT)
570 & I40E_QTX_CTL_VFVM_INDX_MASK);
571 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
572 i40e_flush(hw);
573
574error_context:
575 return ret;
576}
577
578
579
580
581
582
583
584
585
586
587static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
588 u16 vsi_queue_id,
589 struct virtchnl_rxq_info *info)
590{
591 struct i40e_pf *pf = vf->pf;
592 struct i40e_hw *hw = &pf->hw;
593 struct i40e_hmc_obj_rxq rx_ctx;
594 u16 pf_queue_id;
595 int ret = 0;
596
597 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
598
599
600 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
601
602
603 rx_ctx.base = info->dma_ring_addr / 128;
604 rx_ctx.qlen = info->ring_len;
605
606 if (info->splithdr_enabled) {
607 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
608 I40E_RX_SPLIT_IP |
609 I40E_RX_SPLIT_TCP_UDP |
610 I40E_RX_SPLIT_SCTP;
611
612 if (info->hdr_size > ((2 * 1024) - 64)) {
613 ret = -EINVAL;
614 goto error_param;
615 }
616 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
617
618
619 rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
620 }
621
622
623 if (info->databuffer_size > ((16 * 1024) - 128)) {
624 ret = -EINVAL;
625 goto error_param;
626 }
627 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
628
629
630 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
631 ret = -EINVAL;
632 goto error_param;
633 }
634 rx_ctx.rxmax = info->max_pkt_size;
635
636
637 rx_ctx.dsize = 1;
638
639
640 rx_ctx.lrxqthresh = 1;
641 rx_ctx.crcstrip = 1;
642 rx_ctx.prefena = 1;
643 rx_ctx.l2tsel = 1;
644
645
646 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
647 if (ret) {
648 dev_err(&pf->pdev->dev,
649 "Failed to clear VF LAN Rx queue context %d, error: %d\n",
650 pf_queue_id, ret);
651 ret = -ENOENT;
652 goto error_param;
653 }
654
655
656 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
657 if (ret) {
658 dev_err(&pf->pdev->dev,
659 "Failed to set VF LAN Rx queue context %d error: %d\n",
660 pf_queue_id, ret);
661 ret = -ENOENT;
662 goto error_param;
663 }
664
665error_param:
666 return ret;
667}
668
669
670
671
672
673
674
675
676static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
677{
678 struct i40e_mac_filter *f = NULL;
679 struct i40e_pf *pf = vf->pf;
680 struct i40e_vsi *vsi;
681 int ret = 0;
682
683 vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id);
684
685 if (!vsi) {
686 dev_err(&pf->pdev->dev,
687 "add vsi failed for VF %d, aq_err %d\n",
688 vf->vf_id, pf->hw.aq.asq_last_status);
689 ret = -ENOENT;
690 goto error_alloc_vsi_res;
691 }
692 if (type == I40E_VSI_SRIOV) {
693 u64 hena = i40e_pf_get_default_rss_hena(pf);
694 u8 broadcast[ETH_ALEN];
695
696 vf->lan_vsi_idx = vsi->idx;
697 vf->lan_vsi_id = vsi->id;
698
699
700
701
702
703
704 if (vf->port_vlan_id)
705 i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
706
707 spin_lock_bh(&vsi->mac_filter_hash_lock);
708 if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
709 f = i40e_add_mac_filter(vsi,
710 vf->default_lan_addr.addr);
711 if (!f)
712 dev_info(&pf->pdev->dev,
713 "Could not add MAC filter %pM for VF %d\n",
714 vf->default_lan_addr.addr, vf->vf_id);
715 }
716 eth_broadcast_addr(broadcast);
717 f = i40e_add_mac_filter(vsi, broadcast);
718 if (!f)
719 dev_info(&pf->pdev->dev,
720 "Could not allocate VF broadcast filter\n");
721 spin_unlock_bh(&vsi->mac_filter_hash_lock);
722 wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
723 wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
724 }
725
726
727 ret = i40e_sync_vsi_filters(vsi);
728 if (ret)
729 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
730
731
732 if (vf->tx_rate) {
733 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
734 vf->tx_rate / 50, 0, NULL);
735 if (ret)
736 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
737 vf->vf_id, ret);
738 }
739
740error_alloc_vsi_res:
741 return ret;
742}
743
744
745
746
747
748
749
750static void i40e_enable_vf_mappings(struct i40e_vf *vf)
751{
752 struct i40e_pf *pf = vf->pf;
753 struct i40e_hw *hw = &pf->hw;
754 u32 reg, total_queue_pairs = 0;
755 int j;
756
757
758
759
760
761 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
762 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
763
764
765 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
766 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
767
768
769 for (j = 0; j < pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; j++) {
770 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j);
771
772 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
773 wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
774 total_queue_pairs++;
775 }
776
777
778 for (j = 0; j < 7; j++) {
779 if (j * 2 >= pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs) {
780 reg = 0x07FF07FF;
781 } else {
782 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id,
783 j * 2);
784 reg = qid;
785 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id,
786 (j * 2) + 1);
787 reg |= qid << 16;
788 }
789 i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id),
790 reg);
791 }
792
793 i40e_flush(hw);
794}
795
796
797
798
799
800
801
802static void i40e_disable_vf_mappings(struct i40e_vf *vf)
803{
804 struct i40e_pf *pf = vf->pf;
805 struct i40e_hw *hw = &pf->hw;
806 int i;
807
808
809 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
810 for (i = 0; i < I40E_MAX_VSI_QP; i++)
811 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
812 I40E_QUEUE_END_OF_LIST);
813 i40e_flush(hw);
814}
815
816
817
818
819
820
821
822static void i40e_free_vf_res(struct i40e_vf *vf)
823{
824 struct i40e_pf *pf = vf->pf;
825 struct i40e_hw *hw = &pf->hw;
826 u32 reg_idx, reg;
827 int i, msix_vf;
828
829
830
831
832 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
833
834
835
836
837 if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) {
838 pf->queues_left += vf->num_queue_pairs -
839 I40E_DEFAULT_QUEUES_PER_VF;
840 }
841
842
843 if (vf->lan_vsi_idx) {
844 i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
845 vf->lan_vsi_idx = 0;
846 vf->lan_vsi_id = 0;
847 vf->num_mac = 0;
848 }
849 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
850
851
852 for (i = 0; i < msix_vf; i++) {
853
854 if (0 == i)
855 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
856 else
857 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
858 (vf->vf_id))
859 + (i - 1));
860 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
861 i40e_flush(hw);
862 }
863
864
865 for (i = 0; i < msix_vf; i++) {
866
867 if (0 == i)
868 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
869 else
870 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
871 (vf->vf_id))
872 + (i - 1));
873 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
874 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
875 wr32(hw, reg_idx, reg);
876 i40e_flush(hw);
877 }
878
879 vf->num_queue_pairs = 0;
880 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
881 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
882}
883
884
885
886
887
888
889
890static int i40e_alloc_vf_res(struct i40e_vf *vf)
891{
892 struct i40e_pf *pf = vf->pf;
893 int total_queue_pairs = 0;
894 int ret;
895
896 if (vf->num_req_queues &&
897 vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF)
898 pf->num_vf_qps = vf->num_req_queues;
899 else
900 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
901
902
903 ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV);
904 if (ret)
905 goto error_alloc;
906 total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
907
908
909
910
911
912
913 if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF)
914 pf->queues_left -=
915 total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF;
916
917 if (vf->trusted)
918 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
919 else
920 clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
921
922
923
924
925 vf->num_queue_pairs = total_queue_pairs;
926
927
928 set_bit(I40E_VF_STATE_INIT, &vf->vf_states);
929
930error_alloc:
931 if (ret)
932 i40e_free_vf_res(vf);
933
934 return ret;
935}
936
937#define VF_DEVICE_STATUS 0xAA
938#define VF_TRANS_PENDING_MASK 0x20
939
940
941
942
943
944
945
946static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
947{
948 struct i40e_pf *pf = vf->pf;
949 struct i40e_hw *hw = &pf->hw;
950 int vf_abs_id, i;
951 u32 reg;
952
953 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
954
955 wr32(hw, I40E_PF_PCI_CIAA,
956 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
957 for (i = 0; i < 100; i++) {
958 reg = rd32(hw, I40E_PF_PCI_CIAD);
959 if ((reg & VF_TRANS_PENDING_MASK) == 0)
960 return 0;
961 udelay(1);
962 }
963 return -EIO;
964}
965
966
967
968
969
970
971
972
973
974
975static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
976{
977 struct i40e_pf *pf = vf->pf;
978 struct i40e_hw *hw = &pf->hw;
979 u32 reg, reg_idx, bit_idx;
980
981
982 clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
983
984
985
986
987
988
989
990 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
991
992
993
994
995 if (!flr) {
996
997 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
998 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
999 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1000 i40e_flush(hw);
1001 }
1002
1003 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
1004 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
1005 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1006 i40e_flush(hw);
1007
1008 if (i40e_quiesce_vf_pci(vf))
1009 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
1010 vf->vf_id);
1011}
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
1022{
1023 struct i40e_pf *pf = vf->pf;
1024 struct i40e_hw *hw = &pf->hw;
1025 u32 reg;
1026
1027
1028 i40e_free_vf_res(vf);
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1041 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1042 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1043
1044
1045 if (!i40e_alloc_vf_res(vf)) {
1046 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1047 i40e_enable_vf_mappings(vf);
1048 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1049 clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1050
1051 if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
1052 &vf->vf_states))
1053 i40e_notify_client_of_vf_reset(pf, abs_vf_id);
1054 vf->num_vlan = 0;
1055 }
1056
1057
1058
1059
1060
1061 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1062}
1063
1064
1065
1066
1067
1068
1069
1070
1071bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
1072{
1073 struct i40e_pf *pf = vf->pf;
1074 struct i40e_hw *hw = &pf->hw;
1075 bool rsd = false;
1076 u32 reg;
1077 int i;
1078
1079
1080
1081
1082 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1083 return false;
1084
1085 i40e_trigger_vf_reset(vf, flr);
1086
1087
1088
1089
1090 for (i = 0; i < 10; i++) {
1091
1092
1093
1094
1095
1096 usleep_range(10000, 20000);
1097 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1098 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
1099 rsd = true;
1100 break;
1101 }
1102 }
1103
1104 if (flr)
1105 usleep_range(10000, 20000);
1106
1107 if (!rsd)
1108 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1109 vf->vf_id);
1110 usleep_range(10000, 20000);
1111
1112
1113 if (vf->lan_vsi_idx != 0)
1114 i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
1115
1116 i40e_cleanup_reset_vf(vf);
1117
1118 i40e_flush(hw);
1119 clear_bit(__I40E_VF_DISABLE, pf->state);
1120
1121 return true;
1122}
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
1137{
1138 struct i40e_hw *hw = &pf->hw;
1139 struct i40e_vf *vf;
1140 int i, v;
1141 u32 reg;
1142
1143
1144 if (!pf->num_alloc_vfs)
1145 return false;
1146
1147
1148 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1149 return false;
1150
1151
1152 for (v = 0; v < pf->num_alloc_vfs; v++)
1153 i40e_trigger_vf_reset(&pf->vf[v], flr);
1154
1155
1156
1157
1158
1159
1160
1161 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1162 usleep_range(10000, 20000);
1163
1164
1165
1166
1167 while (v < pf->num_alloc_vfs) {
1168 vf = &pf->vf[v];
1169 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1170 if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
1171 break;
1172
1173
1174
1175
1176 v++;
1177 }
1178 }
1179
1180 if (flr)
1181 usleep_range(10000, 20000);
1182
1183
1184
1185
1186 if (v < pf->num_alloc_vfs)
1187 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1188 pf->vf[v].vf_id);
1189 usleep_range(10000, 20000);
1190
1191
1192
1193
1194 for (v = 0; v < pf->num_alloc_vfs; v++) {
1195
1196 if (pf->vf[v].lan_vsi_idx == 0)
1197 continue;
1198
1199 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
1200 }
1201
1202
1203
1204
1205 for (v = 0; v < pf->num_alloc_vfs; v++) {
1206
1207 if (pf->vf[v].lan_vsi_idx == 0)
1208 continue;
1209
1210 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
1211 }
1212
1213
1214
1215
1216 mdelay(50);
1217
1218
1219 for (v = 0; v < pf->num_alloc_vfs; v++)
1220 i40e_cleanup_reset_vf(&pf->vf[v]);
1221
1222 i40e_flush(hw);
1223 clear_bit(__I40E_VF_DISABLE, pf->state);
1224
1225 return true;
1226}
1227
1228
1229
1230
1231
1232
1233
1234void i40e_free_vfs(struct i40e_pf *pf)
1235{
1236 struct i40e_hw *hw = &pf->hw;
1237 u32 reg_idx, bit_idx;
1238 int i, tmp, vf_id;
1239
1240 if (!pf->vf)
1241 return;
1242 while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1243 usleep_range(1000, 2000);
1244
1245 i40e_notify_client_of_vf_enable(pf, 0);
1246
1247
1248 for (i = 0; i < pf->num_alloc_vfs; i++) {
1249 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1250 continue;
1251
1252 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]);
1253 }
1254
1255 for (i = 0; i < pf->num_alloc_vfs; i++) {
1256 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1257 continue;
1258
1259 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
1260 }
1261
1262
1263
1264
1265
1266 if (!pci_vfs_assigned(pf->pdev))
1267 pci_disable_sriov(pf->pdev);
1268 else
1269 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
1270
1271
1272 tmp = pf->num_alloc_vfs;
1273 pf->num_alloc_vfs = 0;
1274 for (i = 0; i < tmp; i++) {
1275 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1276 i40e_free_vf_res(&pf->vf[i]);
1277
1278 i40e_disable_vf_mappings(&pf->vf[i]);
1279 }
1280
1281 kfree(pf->vf);
1282 pf->vf = NULL;
1283
1284
1285
1286
1287
1288 if (!pci_vfs_assigned(pf->pdev)) {
1289
1290
1291
1292 for (vf_id = 0; vf_id < tmp; vf_id++) {
1293 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1294 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1295 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1296 }
1297 }
1298 clear_bit(__I40E_VF_DISABLE, pf->state);
1299}
1300
1301#ifdef CONFIG_PCI_IOV
1302
1303
1304
1305
1306
1307
1308
1309int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
1310{
1311 struct i40e_vf *vfs;
1312 int i, ret = 0;
1313
1314
1315 i40e_irq_dynamic_disable_icr0(pf);
1316
1317
1318 if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
1319 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1320 if (ret) {
1321 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1322 pf->num_alloc_vfs = 0;
1323 goto err_iov;
1324 }
1325 }
1326
1327 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
1328 if (!vfs) {
1329 ret = -ENOMEM;
1330 goto err_alloc;
1331 }
1332 pf->vf = vfs;
1333
1334
1335 for (i = 0; i < num_alloc_vfs; i++) {
1336 vfs[i].pf = pf;
1337 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
1338 vfs[i].vf_id = i;
1339
1340
1341 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1342 vfs[i].spoofchk = true;
1343
1344 set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states);
1345
1346 }
1347 pf->num_alloc_vfs = num_alloc_vfs;
1348
1349
1350 i40e_reset_all_vfs(pf, false);
1351
1352 i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
1353
1354err_alloc:
1355 if (ret)
1356 i40e_free_vfs(pf);
1357err_iov:
1358
1359 i40e_irq_dynamic_enable_icr0(pf);
1360 return ret;
1361}
1362
1363#endif
1364
1365
1366
1367
1368
1369
1370
1371static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
1372{
1373#ifdef CONFIG_PCI_IOV
1374 struct i40e_pf *pf = pci_get_drvdata(pdev);
1375 int pre_existing_vfs = pci_num_vf(pdev);
1376 int err = 0;
1377
1378 if (test_bit(__I40E_TESTING, pf->state)) {
1379 dev_warn(&pdev->dev,
1380 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
1381 err = -EPERM;
1382 goto err_out;
1383 }
1384
1385 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1386 i40e_free_vfs(pf);
1387 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1388 goto out;
1389
1390 if (num_vfs > pf->num_req_vfs) {
1391 dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
1392 num_vfs, pf->num_req_vfs);
1393 err = -EPERM;
1394 goto err_out;
1395 }
1396
1397 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
1398 err = i40e_alloc_vfs(pf, num_vfs);
1399 if (err) {
1400 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
1401 goto err_out;
1402 }
1403
1404out:
1405 return num_vfs;
1406
1407err_out:
1408 return err;
1409#endif
1410 return 0;
1411}
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1422{
1423 struct i40e_pf *pf = pci_get_drvdata(pdev);
1424
1425 if (num_vfs) {
1426 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
1427 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
1428 i40e_do_reset_safe(pf,
1429 BIT_ULL(__I40E_PF_RESET_REQUESTED));
1430 }
1431 return i40e_pci_sriov_enable(pdev, num_vfs);
1432 }
1433
1434 if (!pci_vfs_assigned(pf->pdev)) {
1435 i40e_free_vfs(pf);
1436 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1437 i40e_do_reset_safe(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
1438 } else {
1439 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
1440 return -EINVAL;
1441 }
1442 return 0;
1443}
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1458 u32 v_retval, u8 *msg, u16 msglen)
1459{
1460 struct i40e_pf *pf;
1461 struct i40e_hw *hw;
1462 int abs_vf_id;
1463 i40e_status aq_ret;
1464
1465
1466 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1467 return -EINVAL;
1468
1469 pf = vf->pf;
1470 hw = &pf->hw;
1471 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1472
1473
1474 if (v_retval) {
1475 vf->num_invalid_msgs++;
1476 dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
1477 vf->vf_id, v_opcode, v_retval);
1478 if (vf->num_invalid_msgs >
1479 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
1480 dev_err(&pf->pdev->dev,
1481 "Number of invalid messages exceeded for VF %d\n",
1482 vf->vf_id);
1483 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
1484 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1485 }
1486 } else {
1487 vf->num_valid_msgs++;
1488
1489 vf->num_invalid_msgs = 0;
1490 }
1491
1492 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
1493 msg, msglen, NULL);
1494 if (aq_ret) {
1495 dev_info(&pf->pdev->dev,
1496 "Unable to send the message to VF %d aq_err %d\n",
1497 vf->vf_id, pf->hw.aq.asq_last_status);
1498 return -EIO;
1499 }
1500
1501 return 0;
1502}
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
1513 enum virtchnl_ops opcode,
1514 i40e_status retval)
1515{
1516 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
1517}
1518
1519
1520
1521
1522
1523
1524
1525static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
1526{
1527 struct virtchnl_version_info info = {
1528 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
1529 };
1530
1531 vf->vf_ver = *(struct virtchnl_version_info *)msg;
1532
1533 if (VF_IS_V10(&vf->vf_ver))
1534 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1535 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
1536 I40E_SUCCESS, (u8 *)&info,
1537 sizeof(struct virtchnl_version_info));
1538}
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
1549{
1550 struct virtchnl_vf_resource *vfres = NULL;
1551 struct i40e_pf *pf = vf->pf;
1552 i40e_status aq_ret = 0;
1553 struct i40e_vsi *vsi;
1554 int num_vsis = 1;
1555 int len = 0;
1556 int ret;
1557
1558 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
1559 aq_ret = I40E_ERR_PARAM;
1560 goto err;
1561 }
1562
1563 len = (sizeof(struct virtchnl_vf_resource) +
1564 sizeof(struct virtchnl_vsi_resource) * num_vsis);
1565
1566 vfres = kzalloc(len, GFP_KERNEL);
1567 if (!vfres) {
1568 aq_ret = I40E_ERR_NO_MEMORY;
1569 len = 0;
1570 goto err;
1571 }
1572 if (VF_IS_V11(&vf->vf_ver))
1573 vf->driver_caps = *(u32 *)msg;
1574 else
1575 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
1576 VIRTCHNL_VF_OFFLOAD_RSS_REG |
1577 VIRTCHNL_VF_OFFLOAD_VLAN;
1578
1579 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
1580 vsi = pf->vsi[vf->lan_vsi_idx];
1581 if (!vsi->info.pvid)
1582 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
1583
1584 if (i40e_vf_client_capable(pf, vf->vf_id) &&
1585 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) {
1586 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP;
1587 set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
1588 } else {
1589 clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
1590 }
1591
1592 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1593 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
1594 } else {
1595 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
1596 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
1597 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
1598 else
1599 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
1600 }
1601
1602 if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
1603 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1604 vfres->vf_cap_flags |=
1605 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
1606 }
1607
1608 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
1609 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
1610
1611 if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) &&
1612 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
1613 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
1614
1615 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
1616 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
1617 dev_err(&pf->pdev->dev,
1618 "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
1619 vf->vf_id);
1620 aq_ret = I40E_ERR_PARAM;
1621 goto err;
1622 }
1623 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
1624 }
1625
1626 if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) {
1627 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1628 vfres->vf_cap_flags |=
1629 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
1630 }
1631
1632 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
1633 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
1634
1635 vfres->num_vsis = num_vsis;
1636 vfres->num_queue_pairs = vf->num_queue_pairs;
1637 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
1638 vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
1639 vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
1640
1641 if (vf->lan_vsi_idx) {
1642 vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
1643 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
1644 vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
1645
1646 vfres->vsi_res[0].qset_handle
1647 = le16_to_cpu(vsi->info.qs_handle[0]);
1648 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
1649 vf->default_lan_addr.addr);
1650 }
1651 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1652
1653err:
1654
1655 ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
1656 aq_ret, (u8 *)vfres, len);
1657
1658 kfree(vfres);
1659 return ret;
1660}
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
1673{
1674 if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
1675 i40e_reset_vf(vf, false);
1676}
1677
1678
1679
1680
1681
1682
1683
1684static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1685{
1686 struct i40e_mac_filter *f;
1687 int num_vlans = 0, bkt;
1688
1689 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1690 if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
1691 num_vlans++;
1692 }
1693
1694 return num_vlans;
1695}
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
1707 u8 *msg, u16 msglen)
1708{
1709 struct virtchnl_promisc_info *info =
1710 (struct virtchnl_promisc_info *)msg;
1711 struct i40e_pf *pf = vf->pf;
1712 struct i40e_hw *hw = &pf->hw;
1713 struct i40e_mac_filter *f;
1714 i40e_status aq_ret = 0;
1715 bool allmulti = false;
1716 struct i40e_vsi *vsi;
1717 bool alluni = false;
1718 int aq_err = 0;
1719 int bkt;
1720
1721 vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
1722 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
1723 !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
1724 !vsi) {
1725 aq_ret = I40E_ERR_PARAM;
1726 goto error_param;
1727 }
1728 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
1729 dev_err(&pf->pdev->dev,
1730 "Unprivileged VF %d is attempting to configure promiscuous mode\n",
1731 vf->vf_id);
1732
1733 aq_ret = 0;
1734 goto error_param;
1735 }
1736
1737 if (info->flags & FLAG_VF_MULTICAST_PROMISC)
1738 allmulti = true;
1739
1740 if (vf->port_vlan_id) {
1741 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid,
1742 allmulti,
1743 vf->port_vlan_id,
1744 NULL);
1745 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
1746 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1747 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
1748 continue;
1749 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw,
1750 vsi->seid,
1751 allmulti,
1752 f->vlan,
1753 NULL);
1754 aq_err = pf->hw.aq.asq_last_status;
1755 if (aq_ret) {
1756 dev_err(&pf->pdev->dev,
1757 "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n",
1758 f->vlan,
1759 i40e_stat_str(&pf->hw, aq_ret),
1760 i40e_aq_str(&pf->hw, aq_err));
1761 break;
1762 }
1763 }
1764 } else {
1765 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
1766 allmulti, NULL);
1767 aq_err = pf->hw.aq.asq_last_status;
1768 if (aq_ret) {
1769 dev_err(&pf->pdev->dev,
1770 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
1771 vf->vf_id,
1772 i40e_stat_str(&pf->hw, aq_ret),
1773 i40e_aq_str(&pf->hw, aq_err));
1774 goto error_param;
1775 }
1776 }
1777
1778 if (!aq_ret) {
1779 dev_info(&pf->pdev->dev,
1780 "VF %d successfully set multicast promiscuous mode\n",
1781 vf->vf_id);
1782 if (allmulti)
1783 set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1784 else
1785 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1786 }
1787
1788 if (info->flags & FLAG_VF_UNICAST_PROMISC)
1789 alluni = true;
1790 if (vf->port_vlan_id) {
1791 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid,
1792 alluni,
1793 vf->port_vlan_id,
1794 NULL);
1795 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
1796 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1797 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
1798 continue;
1799 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw,
1800 vsi->seid,
1801 alluni,
1802 f->vlan,
1803 NULL);
1804 aq_err = pf->hw.aq.asq_last_status;
1805 if (aq_ret)
1806 dev_err(&pf->pdev->dev,
1807 "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n",
1808 f->vlan,
1809 i40e_stat_str(&pf->hw, aq_ret),
1810 i40e_aq_str(&pf->hw, aq_err));
1811 }
1812 } else {
1813 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
1814 alluni, NULL,
1815 true);
1816 aq_err = pf->hw.aq.asq_last_status;
1817 if (aq_ret) {
1818 dev_err(&pf->pdev->dev,
1819 "VF %d failed to set unicast promiscuous mode %8.8x err %s aq_err %s\n",
1820 vf->vf_id, info->flags,
1821 i40e_stat_str(&pf->hw, aq_ret),
1822 i40e_aq_str(&pf->hw, aq_err));
1823 goto error_param;
1824 }
1825 }
1826
1827 if (!aq_ret) {
1828 dev_info(&pf->pdev->dev,
1829 "VF %d successfully set unicast promiscuous mode\n",
1830 vf->vf_id);
1831 if (alluni)
1832 set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
1833 else
1834 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
1835 }
1836
1837error_param:
1838
1839 return i40e_vc_send_resp_to_vf(vf,
1840 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
1841 aq_ret);
1842}
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1854{
1855 struct virtchnl_vsi_queue_config_info *qci =
1856 (struct virtchnl_vsi_queue_config_info *)msg;
1857 struct virtchnl_queue_pair_info *qpi;
1858 struct i40e_pf *pf = vf->pf;
1859 u16 vsi_id, vsi_queue_id;
1860 i40e_status aq_ret = 0;
1861 int i;
1862
1863 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
1864 aq_ret = I40E_ERR_PARAM;
1865 goto error_param;
1866 }
1867
1868 vsi_id = qci->vsi_id;
1869 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1870 aq_ret = I40E_ERR_PARAM;
1871 goto error_param;
1872 }
1873 for (i = 0; i < qci->num_queue_pairs; i++) {
1874 qpi = &qci->qpair[i];
1875 vsi_queue_id = qpi->txq.queue_id;
1876 if ((qpi->txq.vsi_id != vsi_id) ||
1877 (qpi->rxq.vsi_id != vsi_id) ||
1878 (qpi->rxq.queue_id != vsi_queue_id) ||
1879 !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) {
1880 aq_ret = I40E_ERR_PARAM;
1881 goto error_param;
1882 }
1883
1884 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
1885 &qpi->rxq) ||
1886 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
1887 &qpi->txq)) {
1888 aq_ret = I40E_ERR_PARAM;
1889 goto error_param;
1890 }
1891 }
1892
1893 pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = qci->num_queue_pairs;
1894
1895error_param:
1896
1897 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
1898 aq_ret);
1899}
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1911{
1912 struct virtchnl_irq_map_info *irqmap_info =
1913 (struct virtchnl_irq_map_info *)msg;
1914 struct virtchnl_vector_map *map;
1915 u16 vsi_id, vsi_queue_id, vector_id;
1916 i40e_status aq_ret = 0;
1917 unsigned long tempmap;
1918 int i;
1919
1920 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
1921 aq_ret = I40E_ERR_PARAM;
1922 goto error_param;
1923 }
1924
1925 for (i = 0; i < irqmap_info->num_vectors; i++) {
1926 map = &irqmap_info->vecmap[i];
1927
1928 vector_id = map->vector_id;
1929 vsi_id = map->vsi_id;
1930
1931 if (!i40e_vc_isvalid_vector_id(vf, vector_id) ||
1932 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1933 aq_ret = I40E_ERR_PARAM;
1934 goto error_param;
1935 }
1936
1937
1938 tempmap = map->rxq_map;
1939 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
1940 if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
1941 vsi_queue_id)) {
1942 aq_ret = I40E_ERR_PARAM;
1943 goto error_param;
1944 }
1945 }
1946
1947 tempmap = map->txq_map;
1948 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
1949 if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
1950 vsi_queue_id)) {
1951 aq_ret = I40E_ERR_PARAM;
1952 goto error_param;
1953 }
1954 }
1955
1956 i40e_config_irq_link_list(vf, vsi_id, map);
1957 }
1958error_param:
1959
1960 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
1961 aq_ret);
1962}
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1973{
1974 struct virtchnl_queue_select *vqs =
1975 (struct virtchnl_queue_select *)msg;
1976 struct i40e_pf *pf = vf->pf;
1977 u16 vsi_id = vqs->vsi_id;
1978 i40e_status aq_ret = 0;
1979
1980 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
1981 aq_ret = I40E_ERR_PARAM;
1982 goto error_param;
1983 }
1984
1985 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1986 aq_ret = I40E_ERR_PARAM;
1987 goto error_param;
1988 }
1989
1990 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
1991 aq_ret = I40E_ERR_PARAM;
1992 goto error_param;
1993 }
1994
1995 if (i40e_vsi_start_rings(pf->vsi[vf->lan_vsi_idx]))
1996 aq_ret = I40E_ERR_TIMEOUT;
1997error_param:
1998
1999 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
2000 aq_ret);
2001}
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2013{
2014 struct virtchnl_queue_select *vqs =
2015 (struct virtchnl_queue_select *)msg;
2016 struct i40e_pf *pf = vf->pf;
2017 i40e_status aq_ret = 0;
2018
2019 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2020 aq_ret = I40E_ERR_PARAM;
2021 goto error_param;
2022 }
2023
2024 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2025 aq_ret = I40E_ERR_PARAM;
2026 goto error_param;
2027 }
2028
2029 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
2030 aq_ret = I40E_ERR_PARAM;
2031 goto error_param;
2032 }
2033
2034 i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
2035
2036error_param:
2037
2038 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
2039 aq_ret);
2040}
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen)
2054{
2055 struct virtchnl_vf_res_request *vfres =
2056 (struct virtchnl_vf_res_request *)msg;
2057 int req_pairs = vfres->num_queue_pairs;
2058 int cur_pairs = vf->num_queue_pairs;
2059 struct i40e_pf *pf = vf->pf;
2060
2061 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
2062 return -EINVAL;
2063
2064 if (req_pairs <= 0) {
2065 dev_err(&pf->pdev->dev,
2066 "VF %d tried to request %d queues. Ignoring.\n",
2067 vf->vf_id, req_pairs);
2068 } else if (req_pairs > I40E_MAX_VF_QUEUES) {
2069 dev_err(&pf->pdev->dev,
2070 "VF %d tried to request more than %d queues.\n",
2071 vf->vf_id,
2072 I40E_MAX_VF_QUEUES);
2073 vfres->num_queue_pairs = I40E_MAX_VF_QUEUES;
2074 } else if (req_pairs - cur_pairs > pf->queues_left) {
2075 dev_warn(&pf->pdev->dev,
2076 "VF %d requested %d more queues, but only %d left.\n",
2077 vf->vf_id,
2078 req_pairs - cur_pairs,
2079 pf->queues_left);
2080 vfres->num_queue_pairs = pf->queues_left + cur_pairs;
2081 } else {
2082
2083 vf->num_req_queues = req_pairs;
2084 i40e_vc_notify_vf_reset(vf);
2085 i40e_reset_vf(vf, false);
2086 return 0;
2087 }
2088
2089 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
2090 (u8 *)vfres, sizeof(vfres));
2091}
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2102{
2103 struct virtchnl_queue_select *vqs =
2104 (struct virtchnl_queue_select *)msg;
2105 struct i40e_pf *pf = vf->pf;
2106 struct i40e_eth_stats stats;
2107 i40e_status aq_ret = 0;
2108 struct i40e_vsi *vsi;
2109
2110 memset(&stats, 0, sizeof(struct i40e_eth_stats));
2111
2112 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2113 aq_ret = I40E_ERR_PARAM;
2114 goto error_param;
2115 }
2116
2117 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2118 aq_ret = I40E_ERR_PARAM;
2119 goto error_param;
2120 }
2121
2122 vsi = pf->vsi[vf->lan_vsi_idx];
2123 if (!vsi) {
2124 aq_ret = I40E_ERR_PARAM;
2125 goto error_param;
2126 }
2127 i40e_update_eth_stats(vsi);
2128 stats = vsi->eth_stats;
2129
2130error_param:
2131
2132 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
2133 (u8 *)&stats, sizeof(stats));
2134}
2135
2136
2137#define I40E_VC_MAX_MAC_ADDR_PER_VF 12
2138#define I40E_VC_MAX_VLAN_PER_VF 8
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr)
2151{
2152 struct i40e_pf *pf = vf->pf;
2153 int ret = 0;
2154
2155 if (is_broadcast_ether_addr(macaddr) ||
2156 is_zero_ether_addr(macaddr)) {
2157 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr);
2158 ret = I40E_ERR_INVALID_MAC_ADDR;
2159 } else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) &&
2160 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2161 !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) {
2162
2163
2164
2165
2166
2167
2168
2169 dev_err(&pf->pdev->dev,
2170 "VF attempting to override administratively set MAC address, reload the VF driver to resume normal operation\n");
2171 ret = -EPERM;
2172 } else if ((vf->num_mac >= I40E_VC_MAX_MAC_ADDR_PER_VF) &&
2173 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2174 dev_err(&pf->pdev->dev,
2175 "VF is not trusted, switch the VF to trusted to add more functionality\n");
2176 ret = -EPERM;
2177 }
2178 return ret;
2179}
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2190{
2191 struct virtchnl_ether_addr_list *al =
2192 (struct virtchnl_ether_addr_list *)msg;
2193 struct i40e_pf *pf = vf->pf;
2194 struct i40e_vsi *vsi = NULL;
2195 u16 vsi_id = al->vsi_id;
2196 i40e_status ret = 0;
2197 int i;
2198
2199 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2200 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2201 ret = I40E_ERR_PARAM;
2202 goto error_param;
2203 }
2204
2205 for (i = 0; i < al->num_elements; i++) {
2206 ret = i40e_check_vf_permission(vf, al->list[i].addr);
2207 if (ret)
2208 goto error_param;
2209 }
2210 vsi = pf->vsi[vf->lan_vsi_idx];
2211
2212
2213
2214
2215 spin_lock_bh(&vsi->mac_filter_hash_lock);
2216
2217
2218 for (i = 0; i < al->num_elements; i++) {
2219 struct i40e_mac_filter *f;
2220
2221 f = i40e_find_mac(vsi, al->list[i].addr);
2222 if (!f) {
2223 f = i40e_add_mac_filter(vsi, al->list[i].addr);
2224
2225 if (!f) {
2226 dev_err(&pf->pdev->dev,
2227 "Unable to add MAC filter %pM for VF %d\n",
2228 al->list[i].addr, vf->vf_id);
2229 ret = I40E_ERR_PARAM;
2230 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2231 goto error_param;
2232 } else {
2233 vf->num_mac++;
2234 }
2235 }
2236 }
2237 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2238
2239
2240 ret = i40e_sync_vsi_filters(vsi);
2241 if (ret)
2242 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2243 vf->vf_id, ret);
2244
2245error_param:
2246
2247 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
2248 ret);
2249}
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2260{
2261 struct virtchnl_ether_addr_list *al =
2262 (struct virtchnl_ether_addr_list *)msg;
2263 struct i40e_pf *pf = vf->pf;
2264 struct i40e_vsi *vsi = NULL;
2265 u16 vsi_id = al->vsi_id;
2266 i40e_status ret = 0;
2267 int i;
2268
2269 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2270 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2271 ret = I40E_ERR_PARAM;
2272 goto error_param;
2273 }
2274
2275 for (i = 0; i < al->num_elements; i++) {
2276 if (is_broadcast_ether_addr(al->list[i].addr) ||
2277 is_zero_ether_addr(al->list[i].addr)) {
2278 dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
2279 al->list[i].addr, vf->vf_id);
2280 ret = I40E_ERR_INVALID_MAC_ADDR;
2281 goto error_param;
2282 }
2283 }
2284 vsi = pf->vsi[vf->lan_vsi_idx];
2285
2286 spin_lock_bh(&vsi->mac_filter_hash_lock);
2287
2288 for (i = 0; i < al->num_elements; i++)
2289 if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
2290 ret = I40E_ERR_INVALID_MAC_ADDR;
2291 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2292 goto error_param;
2293 } else {
2294 vf->num_mac--;
2295 }
2296
2297 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2298
2299
2300 ret = i40e_sync_vsi_filters(vsi);
2301 if (ret)
2302 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2303 vf->vf_id, ret);
2304
2305error_param:
2306
2307 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
2308 ret);
2309}
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2320{
2321 struct virtchnl_vlan_filter_list *vfl =
2322 (struct virtchnl_vlan_filter_list *)msg;
2323 struct i40e_pf *pf = vf->pf;
2324 struct i40e_vsi *vsi = NULL;
2325 u16 vsi_id = vfl->vsi_id;
2326 i40e_status aq_ret = 0;
2327 int i;
2328
2329 if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
2330 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2331 dev_err(&pf->pdev->dev,
2332 "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
2333 goto error_param;
2334 }
2335 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2336 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2337 aq_ret = I40E_ERR_PARAM;
2338 goto error_param;
2339 }
2340
2341 for (i = 0; i < vfl->num_elements; i++) {
2342 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
2343 aq_ret = I40E_ERR_PARAM;
2344 dev_err(&pf->pdev->dev,
2345 "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
2346 goto error_param;
2347 }
2348 }
2349 vsi = pf->vsi[vf->lan_vsi_idx];
2350 if (vsi->info.pvid) {
2351 aq_ret = I40E_ERR_PARAM;
2352 goto error_param;
2353 }
2354
2355 i40e_vlan_stripping_enable(vsi);
2356 for (i = 0; i < vfl->num_elements; i++) {
2357
2358 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
2359 if (!ret)
2360 vf->num_vlan++;
2361
2362 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
2363 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
2364 true,
2365 vfl->vlan_id[i],
2366 NULL);
2367 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
2368 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
2369 true,
2370 vfl->vlan_id[i],
2371 NULL);
2372
2373 if (ret)
2374 dev_err(&pf->pdev->dev,
2375 "Unable to add VLAN filter %d for VF %d, error %d\n",
2376 vfl->vlan_id[i], vf->vf_id, ret);
2377 }
2378
2379error_param:
2380
2381 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret);
2382}
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2393{
2394 struct virtchnl_vlan_filter_list *vfl =
2395 (struct virtchnl_vlan_filter_list *)msg;
2396 struct i40e_pf *pf = vf->pf;
2397 struct i40e_vsi *vsi = NULL;
2398 u16 vsi_id = vfl->vsi_id;
2399 i40e_status aq_ret = 0;
2400 int i;
2401
2402 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2403 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2404 aq_ret = I40E_ERR_PARAM;
2405 goto error_param;
2406 }
2407
2408 for (i = 0; i < vfl->num_elements; i++) {
2409 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
2410 aq_ret = I40E_ERR_PARAM;
2411 goto error_param;
2412 }
2413 }
2414
2415 vsi = pf->vsi[vf->lan_vsi_idx];
2416 if (vsi->info.pvid) {
2417 aq_ret = I40E_ERR_PARAM;
2418 goto error_param;
2419 }
2420
2421 for (i = 0; i < vfl->num_elements; i++) {
2422 i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
2423 vf->num_vlan--;
2424
2425 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
2426 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
2427 false,
2428 vfl->vlan_id[i],
2429 NULL);
2430 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
2431 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
2432 false,
2433 vfl->vlan_id[i],
2434 NULL);
2435 }
2436
2437error_param:
2438
2439 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret);
2440}
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2451{
2452 struct i40e_pf *pf = vf->pf;
2453 int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
2454 i40e_status aq_ret = 0;
2455
2456 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2457 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
2458 aq_ret = I40E_ERR_PARAM;
2459 goto error_param;
2460 }
2461
2462 i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
2463 msg, msglen);
2464
2465error_param:
2466
2467 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP,
2468 aq_ret);
2469}
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen,
2481 bool config)
2482{
2483 struct virtchnl_iwarp_qvlist_info *qvlist_info =
2484 (struct virtchnl_iwarp_qvlist_info *)msg;
2485 i40e_status aq_ret = 0;
2486
2487 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2488 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
2489 aq_ret = I40E_ERR_PARAM;
2490 goto error_param;
2491 }
2492
2493 if (config) {
2494 if (i40e_config_iwarp_qvlist(vf, qvlist_info))
2495 aq_ret = I40E_ERR_PARAM;
2496 } else {
2497 i40e_release_iwarp_qvlist(vf);
2498 }
2499
2500error_param:
2501
2502 return i40e_vc_send_resp_to_vf(vf,
2503 config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP :
2504 VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
2505 aq_ret);
2506}
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen)
2517{
2518 struct virtchnl_rss_key *vrk =
2519 (struct virtchnl_rss_key *)msg;
2520 struct i40e_pf *pf = vf->pf;
2521 struct i40e_vsi *vsi = NULL;
2522 u16 vsi_id = vrk->vsi_id;
2523 i40e_status aq_ret = 0;
2524
2525 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2526 !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
2527 (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
2528 aq_ret = I40E_ERR_PARAM;
2529 goto err;
2530 }
2531
2532 vsi = pf->vsi[vf->lan_vsi_idx];
2533 aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
2534err:
2535
2536 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
2537 aq_ret);
2538}
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen)
2549{
2550 struct virtchnl_rss_lut *vrl =
2551 (struct virtchnl_rss_lut *)msg;
2552 struct i40e_pf *pf = vf->pf;
2553 struct i40e_vsi *vsi = NULL;
2554 u16 vsi_id = vrl->vsi_id;
2555 i40e_status aq_ret = 0;
2556
2557 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2558 !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
2559 (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
2560 aq_ret = I40E_ERR_PARAM;
2561 goto err;
2562 }
2563
2564 vsi = pf->vsi[vf->lan_vsi_idx];
2565 aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
2566
2567err:
2568 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
2569 aq_ret);
2570}
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
2581{
2582 struct virtchnl_rss_hena *vrh = NULL;
2583 struct i40e_pf *pf = vf->pf;
2584 i40e_status aq_ret = 0;
2585 int len = 0;
2586
2587 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2588 aq_ret = I40E_ERR_PARAM;
2589 goto err;
2590 }
2591 len = sizeof(struct virtchnl_rss_hena);
2592
2593 vrh = kzalloc(len, GFP_KERNEL);
2594 if (!vrh) {
2595 aq_ret = I40E_ERR_NO_MEMORY;
2596 len = 0;
2597 goto err;
2598 }
2599 vrh->hena = i40e_pf_get_default_rss_hena(pf);
2600err:
2601
2602 aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
2603 aq_ret, (u8 *)vrh, len);
2604 kfree(vrh);
2605 return aq_ret;
2606}
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
2617{
2618 struct virtchnl_rss_hena *vrh =
2619 (struct virtchnl_rss_hena *)msg;
2620 struct i40e_pf *pf = vf->pf;
2621 struct i40e_hw *hw = &pf->hw;
2622 i40e_status aq_ret = 0;
2623
2624 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2625 aq_ret = I40E_ERR_PARAM;
2626 goto err;
2627 }
2628 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
2629 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
2630 (u32)(vrh->hena >> 32));
2631
2632
2633err:
2634 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
2635}
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg,
2646 u16 msglen)
2647{
2648 struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
2649 i40e_status aq_ret = 0;
2650
2651 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2652 aq_ret = I40E_ERR_PARAM;
2653 goto err;
2654 }
2655
2656 i40e_vlan_stripping_enable(vsi);
2657
2658
2659err:
2660 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
2661 aq_ret);
2662}
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg,
2673 u16 msglen)
2674{
2675 struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
2676 i40e_status aq_ret = 0;
2677
2678 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2679 aq_ret = I40E_ERR_PARAM;
2680 goto err;
2681 }
2682
2683 i40e_vlan_stripping_disable(vsi);
2684
2685
2686err:
2687 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
2688 aq_ret);
2689}
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
2703 u32 v_retval, u8 *msg, u16 msglen)
2704{
2705 struct i40e_hw *hw = &pf->hw;
2706 int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
2707 struct i40e_vf *vf;
2708 int ret;
2709
2710 pf->vf_aq_requests++;
2711 if (local_vf_id >= pf->num_alloc_vfs)
2712 return -EINVAL;
2713 vf = &(pf->vf[local_vf_id]);
2714
2715
2716 if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
2717 return I40E_ERR_PARAM;
2718
2719
2720 ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
2721
2722
2723 if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) {
2724 struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg;
2725
2726 if (vrk->key_len != I40E_HKEY_ARRAY_SIZE)
2727 ret = -EINVAL;
2728 } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) {
2729 struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
2730
2731 if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)
2732 ret = -EINVAL;
2733 }
2734
2735 if (ret) {
2736 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
2737 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
2738 local_vf_id, v_opcode, msglen);
2739 switch (ret) {
2740 case VIRTCHNL_ERR_PARAM:
2741 return -EPERM;
2742 default:
2743 return -EINVAL;
2744 }
2745 }
2746
2747 switch (v_opcode) {
2748 case VIRTCHNL_OP_VERSION:
2749 ret = i40e_vc_get_version_msg(vf, msg);
2750 break;
2751 case VIRTCHNL_OP_GET_VF_RESOURCES:
2752 ret = i40e_vc_get_vf_resources_msg(vf, msg);
2753 break;
2754 case VIRTCHNL_OP_RESET_VF:
2755 i40e_vc_reset_vf_msg(vf);
2756 ret = 0;
2757 break;
2758 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
2759 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
2760 break;
2761 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
2762 ret = i40e_vc_config_queues_msg(vf, msg, msglen);
2763 break;
2764 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
2765 ret = i40e_vc_config_irq_map_msg(vf, msg, msglen);
2766 break;
2767 case VIRTCHNL_OP_ENABLE_QUEUES:
2768 ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
2769 i40e_vc_notify_vf_link_state(vf);
2770 break;
2771 case VIRTCHNL_OP_DISABLE_QUEUES:
2772 ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
2773 break;
2774 case VIRTCHNL_OP_ADD_ETH_ADDR:
2775 ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen);
2776 break;
2777 case VIRTCHNL_OP_DEL_ETH_ADDR:
2778 ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen);
2779 break;
2780 case VIRTCHNL_OP_ADD_VLAN:
2781 ret = i40e_vc_add_vlan_msg(vf, msg, msglen);
2782 break;
2783 case VIRTCHNL_OP_DEL_VLAN:
2784 ret = i40e_vc_remove_vlan_msg(vf, msg, msglen);
2785 break;
2786 case VIRTCHNL_OP_GET_STATS:
2787 ret = i40e_vc_get_stats_msg(vf, msg, msglen);
2788 break;
2789 case VIRTCHNL_OP_IWARP:
2790 ret = i40e_vc_iwarp_msg(vf, msg, msglen);
2791 break;
2792 case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
2793 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, true);
2794 break;
2795 case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
2796 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false);
2797 break;
2798 case VIRTCHNL_OP_CONFIG_RSS_KEY:
2799 ret = i40e_vc_config_rss_key(vf, msg, msglen);
2800 break;
2801 case VIRTCHNL_OP_CONFIG_RSS_LUT:
2802 ret = i40e_vc_config_rss_lut(vf, msg, msglen);
2803 break;
2804 case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
2805 ret = i40e_vc_get_rss_hena(vf, msg, msglen);
2806 break;
2807 case VIRTCHNL_OP_SET_RSS_HENA:
2808 ret = i40e_vc_set_rss_hena(vf, msg, msglen);
2809 break;
2810 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
2811 ret = i40e_vc_enable_vlan_stripping(vf, msg, msglen);
2812 break;
2813 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
2814 ret = i40e_vc_disable_vlan_stripping(vf, msg, msglen);
2815 break;
2816 case VIRTCHNL_OP_REQUEST_QUEUES:
2817 ret = i40e_vc_request_queues_msg(vf, msg, msglen);
2818 break;
2819
2820 case VIRTCHNL_OP_UNKNOWN:
2821 default:
2822 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
2823 v_opcode, local_vf_id);
2824 ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
2825 I40E_ERR_NOT_IMPLEMENTED);
2826 break;
2827 }
2828
2829 return ret;
2830}
2831
2832
2833
2834
2835
2836
2837
2838
2839int i40e_vc_process_vflr_event(struct i40e_pf *pf)
2840{
2841 struct i40e_hw *hw = &pf->hw;
2842 u32 reg, reg_idx, bit_idx;
2843 struct i40e_vf *vf;
2844 int vf_id;
2845
2846 if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
2847 return 0;
2848
2849
2850
2851
2852
2853
2854 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
2855 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
2856 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2857 i40e_flush(hw);
2858
2859 clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
2860 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
2861 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
2862 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
2863
2864 vf = &pf->vf[vf_id];
2865 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
2866 if (reg & BIT(bit_idx))
2867
2868 i40e_reset_vf(vf, true);
2869 }
2870
2871 return 0;
2872}
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
2883{
2884 struct i40e_netdev_priv *np = netdev_priv(netdev);
2885 struct i40e_vsi *vsi = np->vsi;
2886 struct i40e_pf *pf = vsi->back;
2887 struct i40e_mac_filter *f;
2888 struct i40e_vf *vf;
2889 int ret = 0;
2890 struct hlist_node *h;
2891 int bkt;
2892 u8 i;
2893
2894
2895 if (vf_id >= pf->num_alloc_vfs) {
2896 dev_err(&pf->pdev->dev,
2897 "Invalid VF Identifier %d\n", vf_id);
2898 ret = -EINVAL;
2899 goto error_param;
2900 }
2901
2902 vf = &(pf->vf[vf_id]);
2903 vsi = pf->vsi[vf->lan_vsi_idx];
2904
2905
2906
2907
2908
2909 for (i = 0; i < 15; i++) {
2910 if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
2911 break;
2912 msleep(20);
2913 }
2914 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
2915 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
2916 vf_id);
2917 ret = -EAGAIN;
2918 goto error_param;
2919 }
2920
2921 if (is_multicast_ether_addr(mac)) {
2922 dev_err(&pf->pdev->dev,
2923 "Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
2924 ret = -EINVAL;
2925 goto error_param;
2926 }
2927
2928
2929
2930
2931 spin_lock_bh(&vsi->mac_filter_hash_lock);
2932
2933
2934 if (!is_zero_ether_addr(vf->default_lan_addr.addr))
2935 i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
2936
2937
2938
2939
2940 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
2941 __i40e_del_filter(vsi, f);
2942
2943 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2944
2945
2946 if (i40e_sync_vsi_filters(vsi)) {
2947 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
2948 ret = -EIO;
2949 goto error_param;
2950 }
2951 ether_addr_copy(vf->default_lan_addr.addr, mac);
2952
2953 if (is_zero_ether_addr(mac)) {
2954 vf->pf_set_mac = false;
2955 dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id);
2956 } else {
2957 vf->pf_set_mac = true;
2958 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n",
2959 mac, vf_id);
2960 }
2961
2962
2963 i40e_vc_disable_vf(vf);
2964 dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
2965
2966error_param:
2967 return ret;
2968}
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978static bool i40e_vsi_has_vlans(struct i40e_vsi *vsi)
2979{
2980 bool have_vlans;
2981
2982
2983
2984
2985 if (vsi->info.pvid)
2986 return false;
2987
2988
2989
2990
2991 spin_lock_bh(&vsi->mac_filter_hash_lock);
2992 have_vlans = i40e_is_vsi_in_vlan(vsi);
2993 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2994
2995 return have_vlans;
2996}
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
3009 u16 vlan_id, u8 qos, __be16 vlan_proto)
3010{
3011 u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
3012 struct i40e_netdev_priv *np = netdev_priv(netdev);
3013 struct i40e_pf *pf = np->vsi->back;
3014 struct i40e_vsi *vsi;
3015 struct i40e_vf *vf;
3016 int ret = 0;
3017
3018
3019 if (vf_id >= pf->num_alloc_vfs) {
3020 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
3021 ret = -EINVAL;
3022 goto error_pvid;
3023 }
3024
3025 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
3026 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
3027 ret = -EINVAL;
3028 goto error_pvid;
3029 }
3030
3031 if (vlan_proto != htons(ETH_P_8021Q)) {
3032 dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
3033 ret = -EPROTONOSUPPORT;
3034 goto error_pvid;
3035 }
3036
3037 vf = &(pf->vf[vf_id]);
3038 vsi = pf->vsi[vf->lan_vsi_idx];
3039 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
3040 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
3041 vf_id);
3042 ret = -EAGAIN;
3043 goto error_pvid;
3044 }
3045
3046 if (le16_to_cpu(vsi->info.pvid) == vlanprio)
3047
3048 goto error_pvid;
3049
3050 if (i40e_vsi_has_vlans(vsi)) {
3051 dev_err(&pf->pdev->dev,
3052 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
3053 vf_id);
3054
3055
3056
3057
3058 i40e_vc_disable_vf(vf);
3059
3060 vsi = pf->vsi[vf->lan_vsi_idx];
3061 }
3062
3063
3064 spin_lock_bh(&vsi->mac_filter_hash_lock);
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074 if ((!(vlan_id || qos) ||
3075 vlanprio != le16_to_cpu(vsi->info.pvid)) &&
3076 vsi->info.pvid) {
3077 ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
3078 if (ret) {
3079 dev_info(&vsi->back->pdev->dev,
3080 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
3081 vsi->back->hw.aq.asq_last_status);
3082 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3083 goto error_pvid;
3084 }
3085 }
3086
3087 if (vsi->info.pvid) {
3088
3089 i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) &
3090 VLAN_VID_MASK));
3091 }
3092
3093 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3094 if (vlan_id || qos)
3095 ret = i40e_vsi_add_pvid(vsi, vlanprio);
3096 else
3097 i40e_vsi_remove_pvid(vsi);
3098 spin_lock_bh(&vsi->mac_filter_hash_lock);
3099
3100 if (vlan_id) {
3101 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
3102 vlan_id, qos, vf_id);
3103
3104
3105 ret = i40e_add_vlan_all_mac(vsi, vlan_id);
3106 if (ret) {
3107 dev_info(&vsi->back->pdev->dev,
3108 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
3109 vsi->back->hw.aq.asq_last_status);
3110 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3111 goto error_pvid;
3112 }
3113
3114
3115 i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY);
3116 }
3117
3118 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3119
3120
3121 i40e_service_event_schedule(vsi->back);
3122
3123 if (ret) {
3124 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
3125 goto error_pvid;
3126 }
3127
3128
3129
3130
3131 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
3132 ret = 0;
3133
3134error_pvid:
3135 return ret;
3136}
3137
3138#define I40E_BW_CREDIT_DIVISOR 50
3139#define I40E_MAX_BW_INACTIVE_ACCUM 4
3140
3141
3142
3143
3144
3145
3146
3147
3148int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
3149 int max_tx_rate)
3150{
3151 struct i40e_netdev_priv *np = netdev_priv(netdev);
3152 struct i40e_pf *pf = np->vsi->back;
3153 struct i40e_vsi *vsi;
3154 struct i40e_vf *vf;
3155 int speed = 0;
3156 int ret = 0;
3157
3158
3159 if (vf_id >= pf->num_alloc_vfs) {
3160 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id);
3161 ret = -EINVAL;
3162 goto error;
3163 }
3164
3165 if (min_tx_rate) {
3166 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
3167 min_tx_rate, vf_id);
3168 return -EINVAL;
3169 }
3170
3171 vf = &(pf->vf[vf_id]);
3172 vsi = pf->vsi[vf->lan_vsi_idx];
3173 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
3174 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
3175 vf_id);
3176 ret = -EAGAIN;
3177 goto error;
3178 }
3179
3180 switch (pf->hw.phy.link_info.link_speed) {
3181 case I40E_LINK_SPEED_40GB:
3182 speed = 40000;
3183 break;
3184 case I40E_LINK_SPEED_25GB:
3185 speed = 25000;
3186 break;
3187 case I40E_LINK_SPEED_20GB:
3188 speed = 20000;
3189 break;
3190 case I40E_LINK_SPEED_10GB:
3191 speed = 10000;
3192 break;
3193 case I40E_LINK_SPEED_1GB:
3194 speed = 1000;
3195 break;
3196 default:
3197 break;
3198 }
3199
3200 if (max_tx_rate > speed) {
3201 dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for VF %d.\n",
3202 max_tx_rate, vf->vf_id);
3203 ret = -EINVAL;
3204 goto error;
3205 }
3206
3207 if ((max_tx_rate < 50) && (max_tx_rate > 0)) {
3208 dev_warn(&pf->pdev->dev, "Setting max Tx rate to minimum usable value of 50Mbps.\n");
3209 max_tx_rate = 50;
3210 }
3211
3212
3213 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
3214 max_tx_rate / I40E_BW_CREDIT_DIVISOR,
3215 I40E_MAX_BW_INACTIVE_ACCUM, NULL);
3216 if (ret) {
3217 dev_err(&pf->pdev->dev, "Unable to set max tx rate, error code %d.\n",
3218 ret);
3219 ret = -EIO;
3220 goto error;
3221 }
3222 vf->tx_rate = max_tx_rate;
3223error:
3224 return ret;
3225}
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235int i40e_ndo_get_vf_config(struct net_device *netdev,
3236 int vf_id, struct ifla_vf_info *ivi)
3237{
3238 struct i40e_netdev_priv *np = netdev_priv(netdev);
3239 struct i40e_vsi *vsi = np->vsi;
3240 struct i40e_pf *pf = vsi->back;
3241 struct i40e_vf *vf;
3242 int ret = 0;
3243
3244
3245 if (vf_id >= pf->num_alloc_vfs) {
3246 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
3247 ret = -EINVAL;
3248 goto error_param;
3249 }
3250
3251 vf = &(pf->vf[vf_id]);
3252
3253 vsi = pf->vsi[vf->lan_vsi_idx];
3254 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
3255 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
3256 vf_id);
3257 ret = -EAGAIN;
3258 goto error_param;
3259 }
3260
3261 ivi->vf = vf_id;
3262
3263 ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
3264
3265 ivi->max_tx_rate = vf->tx_rate;
3266 ivi->min_tx_rate = 0;
3267 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
3268 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
3269 I40E_VLAN_PRIORITY_SHIFT;
3270 if (vf->link_forced == false)
3271 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
3272 else if (vf->link_up == true)
3273 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
3274 else
3275 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
3276 ivi->spoofchk = vf->spoofchk;
3277 ivi->trusted = vf->trusted;
3278 ret = 0;
3279
3280error_param:
3281 return ret;
3282}
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
3293{
3294 struct i40e_netdev_priv *np = netdev_priv(netdev);
3295 struct i40e_pf *pf = np->vsi->back;
3296 struct virtchnl_pf_event pfe;
3297 struct i40e_hw *hw = &pf->hw;
3298 struct i40e_vf *vf;
3299 int abs_vf_id;
3300 int ret = 0;
3301
3302
3303 if (vf_id >= pf->num_alloc_vfs) {
3304 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
3305 ret = -EINVAL;
3306 goto error_out;
3307 }
3308
3309 vf = &pf->vf[vf_id];
3310 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
3311
3312 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
3313 pfe.severity = PF_EVENT_SEVERITY_INFO;
3314
3315 switch (link) {
3316 case IFLA_VF_LINK_STATE_AUTO:
3317 vf->link_forced = false;
3318 pfe.event_data.link_event.link_status =
3319 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
3320 pfe.event_data.link_event.link_speed =
3321 (enum virtchnl_link_speed)
3322 pf->hw.phy.link_info.link_speed;
3323 break;
3324 case IFLA_VF_LINK_STATE_ENABLE:
3325 vf->link_forced = true;
3326 vf->link_up = true;
3327 pfe.event_data.link_event.link_status = true;
3328 pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB;
3329 break;
3330 case IFLA_VF_LINK_STATE_DISABLE:
3331 vf->link_forced = true;
3332 vf->link_up = false;
3333 pfe.event_data.link_event.link_status = false;
3334 pfe.event_data.link_event.link_speed = 0;
3335 break;
3336 default:
3337 ret = -EINVAL;
3338 goto error_out;
3339 }
3340
3341 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
3342 0, (u8 *)&pfe, sizeof(pfe), NULL);
3343
3344error_out:
3345 return ret;
3346}
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
3357{
3358 struct i40e_netdev_priv *np = netdev_priv(netdev);
3359 struct i40e_vsi *vsi = np->vsi;
3360 struct i40e_pf *pf = vsi->back;
3361 struct i40e_vsi_context ctxt;
3362 struct i40e_hw *hw = &pf->hw;
3363 struct i40e_vf *vf;
3364 int ret = 0;
3365
3366
3367 if (vf_id >= pf->num_alloc_vfs) {
3368 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
3369 ret = -EINVAL;
3370 goto out;
3371 }
3372
3373 vf = &(pf->vf[vf_id]);
3374 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
3375 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
3376 vf_id);
3377 ret = -EAGAIN;
3378 goto out;
3379 }
3380
3381 if (enable == vf->spoofchk)
3382 goto out;
3383
3384 vf->spoofchk = enable;
3385 memset(&ctxt, 0, sizeof(ctxt));
3386 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
3387 ctxt.pf_num = pf->hw.pf_id;
3388 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
3389 if (enable)
3390 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
3391 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
3392 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
3393 if (ret) {
3394 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
3395 ret);
3396 ret = -EIO;
3397 }
3398out:
3399 return ret;
3400}
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
3411{
3412 struct i40e_netdev_priv *np = netdev_priv(netdev);
3413 struct i40e_pf *pf = np->vsi->back;
3414 struct i40e_vf *vf;
3415 int ret = 0;
3416
3417
3418 if (vf_id >= pf->num_alloc_vfs) {
3419 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
3420 return -EINVAL;
3421 }
3422
3423 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
3424 dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
3425 return -EINVAL;
3426 }
3427
3428 vf = &pf->vf[vf_id];
3429
3430 if (setting == vf->trusted)
3431 goto out;
3432
3433 vf->trusted = setting;
3434 i40e_vc_disable_vf(vf);
3435 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
3436 vf_id, setting ? "" : "un");
3437out:
3438 return ret;
3439}
3440