1
2
3
4#include "vf.h"
5#include "ixgbevf.h"
6
7
8
9
10
11#define IXGBE_HV_RESET_OFFSET 0x201
12
13static inline s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg,
14 u32 *retmsg, u16 size)
15{
16 struct ixgbe_mbx_info *mbx = &hw->mbx;
17 s32 retval = mbx->ops.write_posted(hw, msg, size);
18
19 if (retval)
20 return retval;
21
22 return mbx->ops.read_posted(hw, retmsg, size);
23}
24
25
26
27
28
29
30
31
32
33
34static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
35{
36
37 hw->adapter_stopped = false;
38
39 return 0;
40}
41
42
43
44
45
46
47
48
49static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
50{
51 s32 status = hw->mac.ops.start_hw(hw);
52
53 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
54
55 return status;
56}
57
58
59
60
61
62
63
64
65static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
66{
67 struct ixgbe_mbx_info *mbx = &hw->mbx;
68 u32 timeout = IXGBE_VF_INIT_TIMEOUT;
69 s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
70 u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
71 u8 *addr = (u8 *)(&msgbuf[1]);
72
73
74 hw->mac.ops.stop_adapter(hw);
75
76
77 hw->api_version = ixgbe_mbox_api_10;
78
79 IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
80 IXGBE_WRITE_FLUSH(hw);
81
82
83 while (!mbx->ops.check_for_rst(hw) && timeout) {
84 timeout--;
85 udelay(5);
86 }
87
88 if (!timeout)
89 return IXGBE_ERR_RESET_FAILED;
90
91
92 mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
93
94 msgbuf[0] = IXGBE_VF_RESET;
95 mbx->ops.write_posted(hw, msgbuf, 1);
96
97 mdelay(10);
98
99
100
101
102
103 ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
104 if (ret_val)
105 return ret_val;
106
107
108
109
110
111 if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) &&
112 msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
113 return IXGBE_ERR_INVALID_MAC_ADDR;
114
115 if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
116 ether_addr_copy(hw->mac.perm_addr, addr);
117
118 hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
119
120 return 0;
121}
122
123
124
125
126
127
128static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
129{
130#if IS_ENABLED(CONFIG_PCI_MMCONFIG)
131 struct ixgbevf_adapter *adapter = hw->back;
132 int i;
133
134 for (i = 0; i < 6; i++)
135 pci_read_config_byte(adapter->pdev,
136 (i + IXGBE_HV_RESET_OFFSET),
137 &hw->mac.perm_addr[i]);
138 return 0;
139#else
140 pr_err("PCI_MMCONFIG needs to be enabled for Hyper-V\n");
141 return -EOPNOTSUPP;
142#endif
143}
144
145
146
147
148
149
150
151
152
153
154static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
155{
156 u32 number_of_queues;
157 u32 reg_val;
158 u16 i;
159
160
161
162
163 hw->adapter_stopped = true;
164
165
166 number_of_queues = hw->mac.max_rx_queues;
167 for (i = 0; i < number_of_queues; i++) {
168 reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
169 if (reg_val & IXGBE_RXDCTL_ENABLE) {
170 reg_val &= ~IXGBE_RXDCTL_ENABLE;
171 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
172 }
173 }
174
175 IXGBE_WRITE_FLUSH(hw);
176
177
178 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
179
180
181 IXGBE_READ_REG(hw, IXGBE_VTEICR);
182
183
184 number_of_queues = hw->mac.max_tx_queues;
185 for (i = 0; i < number_of_queues; i++) {
186 reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
187 if (reg_val & IXGBE_TXDCTL_ENABLE) {
188 reg_val &= ~IXGBE_TXDCTL_ENABLE;
189 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
190 }
191 }
192
193 return 0;
194}
195
196
197
198
199
200
201
202
203
204
205
206
207
208static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
209{
210 u32 vector = 0;
211
212 switch (hw->mac.mc_filter_type) {
213 case 0:
214 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
215 break;
216 case 1:
217 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
218 break;
219 case 2:
220 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
221 break;
222 case 3:
223 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
224 break;
225 default:
226 break;
227 }
228
229
230 vector &= 0xFFF;
231 return vector;
232}
233
234
235
236
237
238
239static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
240{
241 ether_addr_copy(mac_addr, hw->mac.perm_addr);
242
243 return 0;
244}
245
246static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
247{
248 u32 msgbuf[3], msgbuf_chk;
249 u8 *msg_addr = (u8 *)(&msgbuf[1]);
250 s32 ret_val;
251
252 memset(msgbuf, 0, sizeof(msgbuf));
253
254
255
256
257
258 msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
259 msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
260 msgbuf_chk = msgbuf[0];
261
262 if (addr)
263 ether_addr_copy(msg_addr, addr);
264
265 ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
266 ARRAY_SIZE(msgbuf));
267 if (!ret_val) {
268 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
269
270 if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_NACK))
271 return -ENOMEM;
272 }
273
274 return ret_val;
275}
276
277static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
278{
279 return -EOPNOTSUPP;
280}
281
282
283
284
285
286
287
288
289
290
291
292
293int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
294{
295 int err, i, j;
296 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
297 u32 *hw_reta = &msgbuf[1];
298 u32 mask = 0;
299
300
301
302
303
304
305 int dwords = IXGBEVF_82599_RETA_SIZE / 16;
306
307
308
309
310
311 switch (hw->api_version) {
312 case ixgbe_mbox_api_14:
313 case ixgbe_mbox_api_13:
314 case ixgbe_mbox_api_12:
315 if (hw->mac.type < ixgbe_mac_X550_vf)
316 break;
317
318 default:
319 return -EOPNOTSUPP;
320 }
321
322 msgbuf[0] = IXGBE_VF_GET_RETA;
323
324 err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
325
326 if (err)
327 return err;
328
329 err = hw->mbx.ops.read_posted(hw, msgbuf, dwords + 1);
330
331 if (err)
332 return err;
333
334 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
335
336
337 if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK))
338 return -EPERM;
339
340
341
342
343
344 if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_ACK))
345 return IXGBE_ERR_MBX;
346
347
348 if (num_rx_queues > 1)
349 mask = 0x1;
350
351 for (i = 0; i < dwords; i++)
352 for (j = 0; j < 16; j++)
353 reta[i * 16 + j] = (hw_reta[i] >> (2 * j)) & mask;
354
355 return 0;
356}
357
358
359
360
361
362
363
364
365
366
367
368int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
369{
370 int err;
371 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
372
373
374
375
376
377
378
379 switch (hw->api_version) {
380 case ixgbe_mbox_api_14:
381 case ixgbe_mbox_api_13:
382 case ixgbe_mbox_api_12:
383 if (hw->mac.type < ixgbe_mac_X550_vf)
384 break;
385
386 default:
387 return -EOPNOTSUPP;
388 }
389
390 msgbuf[0] = IXGBE_VF_GET_RSS_KEY;
391 err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
392
393 if (err)
394 return err;
395
396 err = hw->mbx.ops.read_posted(hw, msgbuf, 11);
397
398 if (err)
399 return err;
400
401 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
402
403
404 if (msgbuf[0] == (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_NACK))
405 return -EPERM;
406
407
408
409
410
411 if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_ACK))
412 return IXGBE_ERR_MBX;
413
414 memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE);
415
416 return 0;
417}
418
419
420
421
422
423
424
425
426static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
427 u32 vmdq)
428{
429 u32 msgbuf[3];
430 u8 *msg_addr = (u8 *)(&msgbuf[1]);
431 s32 ret_val;
432
433 memset(msgbuf, 0, sizeof(msgbuf));
434 msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
435 ether_addr_copy(msg_addr, addr);
436
437 ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
438 ARRAY_SIZE(msgbuf));
439 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
440
441
442 if (!ret_val &&
443 (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) {
444 ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
445 return IXGBE_ERR_MBX;
446 }
447
448 return ret_val;
449}
450
451
452
453
454
455
456
457
458
459
460
461
462static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
463 u32 vmdq)
464{
465 if (ether_addr_equal(addr, hw->mac.perm_addr))
466 return 0;
467
468 return -EOPNOTSUPP;
469}
470
471
472
473
474
475
476
477
478static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
479 struct net_device *netdev)
480{
481 struct netdev_hw_addr *ha;
482 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
483 u16 *vector_list = (u16 *)&msgbuf[1];
484 u32 cnt, i;
485
486
487
488
489
490
491
492
493
494
495 cnt = netdev_mc_count(netdev);
496 if (cnt > 30)
497 cnt = 30;
498 msgbuf[0] = IXGBE_VF_SET_MULTICAST;
499 msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
500
501 i = 0;
502 netdev_for_each_mc_addr(ha, netdev) {
503 if (i == cnt)
504 break;
505 if (is_link_local_ether_addr(ha->addr))
506 continue;
507
508 vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
509 }
510
511 ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, IXGBE_VFMAILBOX_SIZE);
512
513 return 0;
514}
515
516
517
518
519
520
521static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,
522 struct net_device *netdev)
523{
524 return -EOPNOTSUPP;
525}
526
527
528
529
530
531
532
533
534static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
535{
536 u32 msgbuf[2];
537 s32 err;
538
539 switch (hw->api_version) {
540 case ixgbe_mbox_api_12:
541
542 if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
543 return -EOPNOTSUPP;
544
545 case ixgbe_mbox_api_14:
546 case ixgbe_mbox_api_13:
547 break;
548 default:
549 return -EOPNOTSUPP;
550 }
551
552 msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
553 msgbuf[1] = xcast_mode;
554
555 err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
556 ARRAY_SIZE(msgbuf));
557 if (err)
558 return err;
559
560 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
561 if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK))
562 return -EPERM;
563
564 return 0;
565}
566
567
568
569
570
571
572static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
573{
574 return -EOPNOTSUPP;
575}
576
577
578
579
580
581
582
583
584static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
585 bool vlan_on)
586{
587 u32 msgbuf[2];
588 s32 err;
589
590 msgbuf[0] = IXGBE_VF_SET_VLAN;
591 msgbuf[1] = vlan;
592
593 msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
594
595 err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
596 ARRAY_SIZE(msgbuf));
597 if (err)
598 goto mbx_err;
599
600
601 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
602 msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT);
603
604 if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_ACK))
605 err = IXGBE_ERR_INVALID_ARGUMENT;
606
607mbx_err:
608 return err;
609}
610
611
612
613
614
615
616
617
618static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
619 bool vlan_on)
620{
621 return -EOPNOTSUPP;
622}
623
624
625
626
627
628
629
630
631
632
633
634static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
635 ixgbe_link_speed speed, bool autoneg,
636 bool autoneg_wait_to_complete)
637{
638 return 0;
639}
640
641
642
643
644
645
646
647
648
649
650static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
651 ixgbe_link_speed *speed,
652 bool *link_up,
653 bool autoneg_wait_to_complete)
654{
655 struct ixgbe_mbx_info *mbx = &hw->mbx;
656 struct ixgbe_mac_info *mac = &hw->mac;
657 s32 ret_val = 0;
658 u32 links_reg;
659 u32 in_msg = 0;
660
661
662 if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
663 mac->get_link_status = true;
664
665 if (!mac->get_link_status)
666 goto out;
667
668
669 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
670 if (!(links_reg & IXGBE_LINKS_UP))
671 goto out;
672
673
674
675
676 if (mac->type == ixgbe_mac_82599_vf) {
677 int i;
678
679 for (i = 0; i < 5; i++) {
680 udelay(100);
681 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
682
683 if (!(links_reg & IXGBE_LINKS_UP))
684 goto out;
685 }
686 }
687
688 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
689 case IXGBE_LINKS_SPEED_10G_82599:
690 *speed = IXGBE_LINK_SPEED_10GB_FULL;
691 break;
692 case IXGBE_LINKS_SPEED_1G_82599:
693 *speed = IXGBE_LINK_SPEED_1GB_FULL;
694 break;
695 case IXGBE_LINKS_SPEED_100_82599:
696 *speed = IXGBE_LINK_SPEED_100_FULL;
697 break;
698 }
699
700
701
702
703 if (mbx->ops.read(hw, &in_msg, 1))
704 goto out;
705
706 if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
707
708 if (in_msg & IXGBE_VT_MSGTYPE_NACK)
709 ret_val = -1;
710 goto out;
711 }
712
713
714 if (!mbx->timeout) {
715 ret_val = -1;
716 goto out;
717 }
718
719
720
721
722 mac->get_link_status = false;
723
724out:
725 *link_up = !mac->get_link_status;
726 return ret_val;
727}
728
729
730
731
732
733
734
735
736static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
737 ixgbe_link_speed *speed,
738 bool *link_up,
739 bool autoneg_wait_to_complete)
740{
741 struct ixgbe_mbx_info *mbx = &hw->mbx;
742 struct ixgbe_mac_info *mac = &hw->mac;
743 u32 links_reg;
744
745
746 if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
747 mac->get_link_status = true;
748
749 if (!mac->get_link_status)
750 goto out;
751
752
753 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
754 if (!(links_reg & IXGBE_LINKS_UP))
755 goto out;
756
757
758
759
760 if (mac->type == ixgbe_mac_82599_vf) {
761 int i;
762
763 for (i = 0; i < 5; i++) {
764 udelay(100);
765 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
766
767 if (!(links_reg & IXGBE_LINKS_UP))
768 goto out;
769 }
770 }
771
772 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
773 case IXGBE_LINKS_SPEED_10G_82599:
774 *speed = IXGBE_LINK_SPEED_10GB_FULL;
775 break;
776 case IXGBE_LINKS_SPEED_1G_82599:
777 *speed = IXGBE_LINK_SPEED_1GB_FULL;
778 break;
779 case IXGBE_LINKS_SPEED_100_82599:
780 *speed = IXGBE_LINK_SPEED_100_FULL;
781 break;
782 }
783
784
785
786
787 mac->get_link_status = false;
788
789out:
790 *link_up = !mac->get_link_status;
791 return 0;
792}
793
794
795
796
797
798
799static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
800{
801 u32 msgbuf[2];
802 s32 ret_val;
803
804 msgbuf[0] = IXGBE_VF_SET_LPE;
805 msgbuf[1] = max_size;
806
807 ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
808 ARRAY_SIZE(msgbuf));
809 if (ret_val)
810 return ret_val;
811 if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
812 (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK))
813 return IXGBE_ERR_MBX;
814
815 return 0;
816}
817
818
819
820
821
822
823
824static s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
825{
826 u32 reg;
827
828
829
830
831 reg = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0));
832
833 reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN);
834 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg);
835
836 return 0;
837}
838
839
840
841
842
843
844static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
845{
846 int err;
847 u32 msg[3];
848
849
850 msg[0] = IXGBE_VF_API_NEGOTIATE;
851 msg[1] = api;
852 msg[2] = 0;
853
854 err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
855 if (!err) {
856 msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
857
858
859 if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
860 hw->api_version = api;
861 return 0;
862 }
863
864 err = IXGBE_ERR_INVALID_ARGUMENT;
865 }
866
867 return err;
868}
869
870
871
872
873
874
875
876static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
877{
878
879 if (api != ixgbe_mbox_api_10)
880 return IXGBE_ERR_INVALID_ARGUMENT;
881
882 return 0;
883}
884
885int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
886 unsigned int *default_tc)
887{
888 int err;
889 u32 msg[5];
890
891
892 switch (hw->api_version) {
893 case ixgbe_mbox_api_11:
894 case ixgbe_mbox_api_12:
895 case ixgbe_mbox_api_13:
896 case ixgbe_mbox_api_14:
897 break;
898 default:
899 return 0;
900 }
901
902
903 msg[0] = IXGBE_VF_GET_QUEUE;
904 msg[1] = msg[2] = msg[3] = msg[4] = 0;
905
906 err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
907 if (!err) {
908 msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
909
910
911
912
913
914 if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_ACK))
915 return IXGBE_ERR_MBX;
916
917
918 hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
919 if (hw->mac.max_tx_queues == 0 ||
920 hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
921 hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
922
923 hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
924 if (hw->mac.max_rx_queues == 0 ||
925 hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
926 hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
927
928 *num_tcs = msg[IXGBE_VF_TRANS_VLAN];
929
930 if (*num_tcs > hw->mac.max_rx_queues)
931 *num_tcs = 1;
932
933 *default_tc = msg[IXGBE_VF_DEF_QUEUE];
934
935 if (*default_tc >= hw->mac.max_tx_queues)
936 *default_tc = 0;
937 }
938
939 return err;
940}
941
942static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
943 .init_hw = ixgbevf_init_hw_vf,
944 .reset_hw = ixgbevf_reset_hw_vf,
945 .start_hw = ixgbevf_start_hw_vf,
946 .get_mac_addr = ixgbevf_get_mac_addr_vf,
947 .stop_adapter = ixgbevf_stop_hw_vf,
948 .setup_link = ixgbevf_setup_mac_link_vf,
949 .check_link = ixgbevf_check_mac_link_vf,
950 .negotiate_api_version = ixgbevf_negotiate_api_version_vf,
951 .set_rar = ixgbevf_set_rar_vf,
952 .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
953 .update_xcast_mode = ixgbevf_update_xcast_mode,
954 .set_uc_addr = ixgbevf_set_uc_addr_vf,
955 .set_vfta = ixgbevf_set_vfta_vf,
956 .set_rlpml = ixgbevf_set_rlpml_vf,
957};
958
959static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = {
960 .init_hw = ixgbevf_init_hw_vf,
961 .reset_hw = ixgbevf_hv_reset_hw_vf,
962 .start_hw = ixgbevf_start_hw_vf,
963 .get_mac_addr = ixgbevf_get_mac_addr_vf,
964 .stop_adapter = ixgbevf_stop_hw_vf,
965 .setup_link = ixgbevf_setup_mac_link_vf,
966 .check_link = ixgbevf_hv_check_mac_link_vf,
967 .negotiate_api_version = ixgbevf_hv_negotiate_api_version_vf,
968 .set_rar = ixgbevf_hv_set_rar_vf,
969 .update_mc_addr_list = ixgbevf_hv_update_mc_addr_list_vf,
970 .update_xcast_mode = ixgbevf_hv_update_xcast_mode,
971 .set_uc_addr = ixgbevf_hv_set_uc_addr_vf,
972 .set_vfta = ixgbevf_hv_set_vfta_vf,
973 .set_rlpml = ixgbevf_hv_set_rlpml_vf,
974};
975
976const struct ixgbevf_info ixgbevf_82599_vf_info = {
977 .mac = ixgbe_mac_82599_vf,
978 .mac_ops = &ixgbevf_mac_ops,
979};
980
981const struct ixgbevf_info ixgbevf_82599_vf_hv_info = {
982 .mac = ixgbe_mac_82599_vf,
983 .mac_ops = &ixgbevf_hv_mac_ops,
984};
985
986const struct ixgbevf_info ixgbevf_X540_vf_info = {
987 .mac = ixgbe_mac_X540_vf,
988 .mac_ops = &ixgbevf_mac_ops,
989};
990
991const struct ixgbevf_info ixgbevf_X540_vf_hv_info = {
992 .mac = ixgbe_mac_X540_vf,
993 .mac_ops = &ixgbevf_hv_mac_ops,
994};
995
996const struct ixgbevf_info ixgbevf_X550_vf_info = {
997 .mac = ixgbe_mac_X550_vf,
998 .mac_ops = &ixgbevf_mac_ops,
999};
1000
1001const struct ixgbevf_info ixgbevf_X550_vf_hv_info = {
1002 .mac = ixgbe_mac_X550_vf,
1003 .mac_ops = &ixgbevf_hv_mac_ops,
1004};
1005
1006const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = {
1007 .mac = ixgbe_mac_X550EM_x_vf,
1008 .mac_ops = &ixgbevf_mac_ops,
1009};
1010
1011const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = {
1012 .mac = ixgbe_mac_X550EM_x_vf,
1013 .mac_ops = &ixgbevf_hv_mac_ops,
1014};
1015
1016const struct ixgbevf_info ixgbevf_x550em_a_vf_info = {
1017 .mac = ixgbe_mac_x550em_a_vf,
1018 .mac_ops = &ixgbevf_mac_ops,
1019};
1020