1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include "vf.h"
28#include "ixgbevf.h"
29
30
31
32
33
34#define IXGBE_HV_RESET_OFFSET 0x201
35
36static inline s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg,
37 u32 *retmsg, u16 size)
38{
39 struct ixgbe_mbx_info *mbx = &hw->mbx;
40 s32 retval = mbx->ops.write_posted(hw, msg, size);
41
42 if (retval)
43 return retval;
44
45 return mbx->ops.read_posted(hw, retmsg, size);
46}
47
48
49
50
51
52
53
54
55
56
57static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
58{
59
60 hw->adapter_stopped = false;
61
62 return 0;
63}
64
65
66
67
68
69
70
71
72static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
73{
74 s32 status = hw->mac.ops.start_hw(hw);
75
76 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
77
78 return status;
79}
80
81
82
83
84
85
86
87
88static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
89{
90 struct ixgbe_mbx_info *mbx = &hw->mbx;
91 u32 timeout = IXGBE_VF_INIT_TIMEOUT;
92 s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
93 u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
94 u8 *addr = (u8 *)(&msgbuf[1]);
95
96
97 hw->mac.ops.stop_adapter(hw);
98
99
100 hw->api_version = ixgbe_mbox_api_10;
101
102 IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
103 IXGBE_WRITE_FLUSH(hw);
104
105
106 while (!mbx->ops.check_for_rst(hw) && timeout) {
107 timeout--;
108 udelay(5);
109 }
110
111 if (!timeout)
112 return IXGBE_ERR_RESET_FAILED;
113
114
115 mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
116
117 msgbuf[0] = IXGBE_VF_RESET;
118 mbx->ops.write_posted(hw, msgbuf, 1);
119
120 mdelay(10);
121
122
123
124
125
126 ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
127 if (ret_val)
128 return ret_val;
129
130
131
132
133
134 if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) &&
135 msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
136 return IXGBE_ERR_INVALID_MAC_ADDR;
137
138 if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
139 ether_addr_copy(hw->mac.perm_addr, addr);
140
141 hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
142
143 return 0;
144}
145
146
147
148
149
150
151static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
152{
153#if IS_ENABLED(CONFIG_PCI_MMCONFIG)
154 struct ixgbevf_adapter *adapter = hw->back;
155 int i;
156
157 for (i = 0; i < 6; i++)
158 pci_read_config_byte(adapter->pdev,
159 (i + IXGBE_HV_RESET_OFFSET),
160 &hw->mac.perm_addr[i]);
161 return 0;
162#else
163 pr_err("PCI_MMCONFIG needs to be enabled for Hyper-V\n");
164 return -EOPNOTSUPP;
165#endif
166}
167
168
169
170
171
172
173
174
175
176
177static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
178{
179 u32 number_of_queues;
180 u32 reg_val;
181 u16 i;
182
183
184
185
186 hw->adapter_stopped = true;
187
188
189 number_of_queues = hw->mac.max_rx_queues;
190 for (i = 0; i < number_of_queues; i++) {
191 reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
192 if (reg_val & IXGBE_RXDCTL_ENABLE) {
193 reg_val &= ~IXGBE_RXDCTL_ENABLE;
194 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
195 }
196 }
197
198 IXGBE_WRITE_FLUSH(hw);
199
200
201 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
202
203
204 IXGBE_READ_REG(hw, IXGBE_VTEICR);
205
206
207 number_of_queues = hw->mac.max_tx_queues;
208 for (i = 0; i < number_of_queues; i++) {
209 reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
210 if (reg_val & IXGBE_TXDCTL_ENABLE) {
211 reg_val &= ~IXGBE_TXDCTL_ENABLE;
212 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
213 }
214 }
215
216 return 0;
217}
218
219
220
221
222
223
224
225
226
227
228
229
230
231static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
232{
233 u32 vector = 0;
234
235 switch (hw->mac.mc_filter_type) {
236 case 0:
237 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
238 break;
239 case 1:
240 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
241 break;
242 case 2:
243 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
244 break;
245 case 3:
246 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
247 break;
248 default:
249 break;
250 }
251
252
253 vector &= 0xFFF;
254 return vector;
255}
256
257
258
259
260
261
262static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
263{
264 ether_addr_copy(mac_addr, hw->mac.perm_addr);
265
266 return 0;
267}
268
269static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
270{
271 u32 msgbuf[3], msgbuf_chk;
272 u8 *msg_addr = (u8 *)(&msgbuf[1]);
273 s32 ret_val;
274
275 memset(msgbuf, 0, sizeof(msgbuf));
276
277
278
279
280
281 msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
282 msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
283 msgbuf_chk = msgbuf[0];
284
285 if (addr)
286 ether_addr_copy(msg_addr, addr);
287
288 ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
289 ARRAY_SIZE(msgbuf));
290 if (!ret_val) {
291 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
292
293 if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_NACK))
294 return -ENOMEM;
295 }
296
297 return ret_val;
298}
299
300static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
301{
302 return -EOPNOTSUPP;
303}
304
305
306
307
308
309
310
311
312
313
314
315
316int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
317{
318 int err, i, j;
319 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
320 u32 *hw_reta = &msgbuf[1];
321 u32 mask = 0;
322
323
324
325
326
327
328 int dwords = IXGBEVF_82599_RETA_SIZE / 16;
329
330
331
332
333
334 switch (hw->api_version) {
335 case ixgbe_mbox_api_13:
336 case ixgbe_mbox_api_12:
337 if (hw->mac.type < ixgbe_mac_X550_vf)
338 break;
339
340 default:
341 return -EOPNOTSUPP;
342 }
343
344 msgbuf[0] = IXGBE_VF_GET_RETA;
345
346 err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
347
348 if (err)
349 return err;
350
351 err = hw->mbx.ops.read_posted(hw, msgbuf, dwords + 1);
352
353 if (err)
354 return err;
355
356 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
357
358
359 if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK))
360 return -EPERM;
361
362
363
364
365
366 if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_ACK))
367 return IXGBE_ERR_MBX;
368
369
370 if (num_rx_queues > 1)
371 mask = 0x1;
372
373 for (i = 0; i < dwords; i++)
374 for (j = 0; j < 16; j++)
375 reta[i * 16 + j] = (hw_reta[i] >> (2 * j)) & mask;
376
377 return 0;
378}
379
380
381
382
383
384
385
386
387
388
389
390int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
391{
392 int err;
393 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
394
395
396
397
398
399
400
401 switch (hw->api_version) {
402 case ixgbe_mbox_api_13:
403 case ixgbe_mbox_api_12:
404 if (hw->mac.type < ixgbe_mac_X550_vf)
405 break;
406
407 default:
408 return -EOPNOTSUPP;
409 }
410
411 msgbuf[0] = IXGBE_VF_GET_RSS_KEY;
412 err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
413
414 if (err)
415 return err;
416
417 err = hw->mbx.ops.read_posted(hw, msgbuf, 11);
418
419 if (err)
420 return err;
421
422 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
423
424
425 if (msgbuf[0] == (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_NACK))
426 return -EPERM;
427
428
429
430
431
432 if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_ACK))
433 return IXGBE_ERR_MBX;
434
435 memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE);
436
437 return 0;
438}
439
440
441
442
443
444
445
446
447static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
448 u32 vmdq)
449{
450 u32 msgbuf[3];
451 u8 *msg_addr = (u8 *)(&msgbuf[1]);
452 s32 ret_val;
453
454 memset(msgbuf, 0, sizeof(msgbuf));
455 msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
456 ether_addr_copy(msg_addr, addr);
457
458 ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
459 ARRAY_SIZE(msgbuf));
460 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
461
462
463 if (!ret_val &&
464 (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) {
465 ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
466 return IXGBE_ERR_MBX;
467 }
468
469 return ret_val;
470}
471
472
473
474
475
476
477
478
479
480
481
482
483static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
484 u32 vmdq)
485{
486 if (ether_addr_equal(addr, hw->mac.perm_addr))
487 return 0;
488
489 return -EOPNOTSUPP;
490}
491
492
493
494
495
496
497
498
499static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
500 struct net_device *netdev)
501{
502 struct netdev_hw_addr *ha;
503 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
504 u16 *vector_list = (u16 *)&msgbuf[1];
505 u32 cnt, i;
506
507
508
509
510
511
512
513
514
515
516 cnt = netdev_mc_count(netdev);
517 if (cnt > 30)
518 cnt = 30;
519 msgbuf[0] = IXGBE_VF_SET_MULTICAST;
520 msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
521
522 i = 0;
523 netdev_for_each_mc_addr(ha, netdev) {
524 if (i == cnt)
525 break;
526 if (is_link_local_ether_addr(ha->addr))
527 continue;
528
529 vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
530 }
531
532 ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, IXGBE_VFMAILBOX_SIZE);
533
534 return 0;
535}
536
537
538
539
540
541
542static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,
543 struct net_device *netdev)
544{
545 return -EOPNOTSUPP;
546}
547
548
549
550
551
552
553
554
555static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
556{
557 u32 msgbuf[2];
558 s32 err;
559
560 switch (hw->api_version) {
561 case ixgbe_mbox_api_12:
562
563 if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
564 return -EOPNOTSUPP;
565
566 case ixgbe_mbox_api_13:
567 break;
568 default:
569 return -EOPNOTSUPP;
570 }
571
572 msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
573 msgbuf[1] = xcast_mode;
574
575 err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
576 ARRAY_SIZE(msgbuf));
577 if (err)
578 return err;
579
580 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
581 if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK))
582 return -EPERM;
583
584 return 0;
585}
586
587
588
589
590
591
592static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
593{
594 return -EOPNOTSUPP;
595}
596
597
598
599
600
601
602
603
604static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
605 bool vlan_on)
606{
607 u32 msgbuf[2];
608 s32 err;
609
610 msgbuf[0] = IXGBE_VF_SET_VLAN;
611 msgbuf[1] = vlan;
612
613 msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
614
615 err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
616 ARRAY_SIZE(msgbuf));
617 if (err)
618 goto mbx_err;
619
620
621 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
622 msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT);
623
624 if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_ACK))
625 err = IXGBE_ERR_INVALID_ARGUMENT;
626
627mbx_err:
628 return err;
629}
630
631
632
633
634
635
636
637
638static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
639 bool vlan_on)
640{
641 return -EOPNOTSUPP;
642}
643
644
645
646
647
648
649
650
651
652
653
654static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
655 ixgbe_link_speed speed, bool autoneg,
656 bool autoneg_wait_to_complete)
657{
658 return 0;
659}
660
661
662
663
664
665
666
667
668
669
670static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
671 ixgbe_link_speed *speed,
672 bool *link_up,
673 bool autoneg_wait_to_complete)
674{
675 struct ixgbe_mbx_info *mbx = &hw->mbx;
676 struct ixgbe_mac_info *mac = &hw->mac;
677 s32 ret_val = 0;
678 u32 links_reg;
679 u32 in_msg = 0;
680
681
682 if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
683 mac->get_link_status = true;
684
685 if (!mac->get_link_status)
686 goto out;
687
688
689 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
690 if (!(links_reg & IXGBE_LINKS_UP))
691 goto out;
692
693
694
695
696 if (mac->type == ixgbe_mac_82599_vf) {
697 int i;
698
699 for (i = 0; i < 5; i++) {
700 udelay(100);
701 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
702
703 if (!(links_reg & IXGBE_LINKS_UP))
704 goto out;
705 }
706 }
707
708 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
709 case IXGBE_LINKS_SPEED_10G_82599:
710 *speed = IXGBE_LINK_SPEED_10GB_FULL;
711 break;
712 case IXGBE_LINKS_SPEED_1G_82599:
713 *speed = IXGBE_LINK_SPEED_1GB_FULL;
714 break;
715 case IXGBE_LINKS_SPEED_100_82599:
716 *speed = IXGBE_LINK_SPEED_100_FULL;
717 break;
718 }
719
720
721
722
723 if (mbx->ops.read(hw, &in_msg, 1))
724 goto out;
725
726 if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
727
728 if (in_msg & IXGBE_VT_MSGTYPE_NACK)
729 ret_val = -1;
730 goto out;
731 }
732
733
734 if (!mbx->timeout) {
735 ret_val = -1;
736 goto out;
737 }
738
739
740
741
742 mac->get_link_status = false;
743
744out:
745 *link_up = !mac->get_link_status;
746 return ret_val;
747}
748
749
750
751
752
753
754
755
756static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
757 ixgbe_link_speed *speed,
758 bool *link_up,
759 bool autoneg_wait_to_complete)
760{
761 struct ixgbe_mbx_info *mbx = &hw->mbx;
762 struct ixgbe_mac_info *mac = &hw->mac;
763 u32 links_reg;
764
765
766 if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
767 mac->get_link_status = true;
768
769 if (!mac->get_link_status)
770 goto out;
771
772
773 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
774 if (!(links_reg & IXGBE_LINKS_UP))
775 goto out;
776
777
778
779
780 if (mac->type == ixgbe_mac_82599_vf) {
781 int i;
782
783 for (i = 0; i < 5; i++) {
784 udelay(100);
785 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
786
787 if (!(links_reg & IXGBE_LINKS_UP))
788 goto out;
789 }
790 }
791
792 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
793 case IXGBE_LINKS_SPEED_10G_82599:
794 *speed = IXGBE_LINK_SPEED_10GB_FULL;
795 break;
796 case IXGBE_LINKS_SPEED_1G_82599:
797 *speed = IXGBE_LINK_SPEED_1GB_FULL;
798 break;
799 case IXGBE_LINKS_SPEED_100_82599:
800 *speed = IXGBE_LINK_SPEED_100_FULL;
801 break;
802 }
803
804
805
806
807 mac->get_link_status = false;
808
809out:
810 *link_up = !mac->get_link_status;
811 return 0;
812}
813
814
815
816
817
818
819static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
820{
821 u32 msgbuf[2];
822 s32 ret_val;
823
824 msgbuf[0] = IXGBE_VF_SET_LPE;
825 msgbuf[1] = max_size;
826
827 ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
828 ARRAY_SIZE(msgbuf));
829 if (ret_val)
830 return ret_val;
831 if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
832 (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK))
833 return IXGBE_ERR_MBX;
834
835 return 0;
836}
837
838
839
840
841
842
843
844static s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
845{
846 u32 reg;
847
848
849
850
851 reg = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0));
852
853 reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN);
854 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg);
855
856 return 0;
857}
858
859
860
861
862
863
864static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
865{
866 int err;
867 u32 msg[3];
868
869
870 msg[0] = IXGBE_VF_API_NEGOTIATE;
871 msg[1] = api;
872 msg[2] = 0;
873
874 err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
875 if (!err) {
876 msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
877
878
879 if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
880 hw->api_version = api;
881 return 0;
882 }
883
884 err = IXGBE_ERR_INVALID_ARGUMENT;
885 }
886
887 return err;
888}
889
890
891
892
893
894
895
896static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
897{
898
899 if (api != ixgbe_mbox_api_10)
900 return IXGBE_ERR_INVALID_ARGUMENT;
901
902 return 0;
903}
904
905int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
906 unsigned int *default_tc)
907{
908 int err;
909 u32 msg[5];
910
911
912 switch (hw->api_version) {
913 case ixgbe_mbox_api_11:
914 case ixgbe_mbox_api_12:
915 case ixgbe_mbox_api_13:
916 break;
917 default:
918 return 0;
919 }
920
921
922 msg[0] = IXGBE_VF_GET_QUEUE;
923 msg[1] = msg[2] = msg[3] = msg[4] = 0;
924
925 err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
926 if (!err) {
927 msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
928
929
930
931
932
933 if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_ACK))
934 return IXGBE_ERR_MBX;
935
936
937 hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
938 if (hw->mac.max_tx_queues == 0 ||
939 hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
940 hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
941
942 hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
943 if (hw->mac.max_rx_queues == 0 ||
944 hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
945 hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
946
947 *num_tcs = msg[IXGBE_VF_TRANS_VLAN];
948
949 if (*num_tcs > hw->mac.max_rx_queues)
950 *num_tcs = 1;
951
952 *default_tc = msg[IXGBE_VF_DEF_QUEUE];
953
954 if (*default_tc >= hw->mac.max_tx_queues)
955 *default_tc = 0;
956 }
957
958 return err;
959}
960
961static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
962 .init_hw = ixgbevf_init_hw_vf,
963 .reset_hw = ixgbevf_reset_hw_vf,
964 .start_hw = ixgbevf_start_hw_vf,
965 .get_mac_addr = ixgbevf_get_mac_addr_vf,
966 .stop_adapter = ixgbevf_stop_hw_vf,
967 .setup_link = ixgbevf_setup_mac_link_vf,
968 .check_link = ixgbevf_check_mac_link_vf,
969 .negotiate_api_version = ixgbevf_negotiate_api_version_vf,
970 .set_rar = ixgbevf_set_rar_vf,
971 .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
972 .update_xcast_mode = ixgbevf_update_xcast_mode,
973 .set_uc_addr = ixgbevf_set_uc_addr_vf,
974 .set_vfta = ixgbevf_set_vfta_vf,
975 .set_rlpml = ixgbevf_set_rlpml_vf,
976};
977
978static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = {
979 .init_hw = ixgbevf_init_hw_vf,
980 .reset_hw = ixgbevf_hv_reset_hw_vf,
981 .start_hw = ixgbevf_start_hw_vf,
982 .get_mac_addr = ixgbevf_get_mac_addr_vf,
983 .stop_adapter = ixgbevf_stop_hw_vf,
984 .setup_link = ixgbevf_setup_mac_link_vf,
985 .check_link = ixgbevf_hv_check_mac_link_vf,
986 .negotiate_api_version = ixgbevf_hv_negotiate_api_version_vf,
987 .set_rar = ixgbevf_hv_set_rar_vf,
988 .update_mc_addr_list = ixgbevf_hv_update_mc_addr_list_vf,
989 .update_xcast_mode = ixgbevf_hv_update_xcast_mode,
990 .set_uc_addr = ixgbevf_hv_set_uc_addr_vf,
991 .set_vfta = ixgbevf_hv_set_vfta_vf,
992 .set_rlpml = ixgbevf_hv_set_rlpml_vf,
993};
994
995const struct ixgbevf_info ixgbevf_82599_vf_info = {
996 .mac = ixgbe_mac_82599_vf,
997 .mac_ops = &ixgbevf_mac_ops,
998};
999
1000const struct ixgbevf_info ixgbevf_82599_vf_hv_info = {
1001 .mac = ixgbe_mac_82599_vf,
1002 .mac_ops = &ixgbevf_hv_mac_ops,
1003};
1004
1005const struct ixgbevf_info ixgbevf_X540_vf_info = {
1006 .mac = ixgbe_mac_X540_vf,
1007 .mac_ops = &ixgbevf_mac_ops,
1008};
1009
1010const struct ixgbevf_info ixgbevf_X540_vf_hv_info = {
1011 .mac = ixgbe_mac_X540_vf,
1012 .mac_ops = &ixgbevf_hv_mac_ops,
1013};
1014
1015const struct ixgbevf_info ixgbevf_X550_vf_info = {
1016 .mac = ixgbe_mac_X550_vf,
1017 .mac_ops = &ixgbevf_mac_ops,
1018};
1019
1020const struct ixgbevf_info ixgbevf_X550_vf_hv_info = {
1021 .mac = ixgbe_mac_X550_vf,
1022 .mac_ops = &ixgbevf_hv_mac_ops,
1023};
1024
1025const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = {
1026 .mac = ixgbe_mac_X550EM_x_vf,
1027 .mac_ops = &ixgbevf_mac_ops,
1028};
1029
1030const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = {
1031 .mac = ixgbe_mac_X550EM_x_vf,
1032 .mac_ops = &ixgbevf_hv_mac_ops,
1033};
1034
1035const struct ixgbevf_info ixgbevf_x550em_a_vf_info = {
1036 .mac = ixgbe_mac_x550em_a_vf,
1037 .mac_ops = &ixgbevf_mac_ops,
1038};
1039