1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include "vf.h"
28#include "ixgbevf.h"
29
30
31
32
33
34
35
36
37
38
39static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
40{
41
42 hw->adapter_stopped = false;
43
44 return 0;
45}
46
47
48
49
50
51
52
53
54static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
55{
56 s32 status = hw->mac.ops.start_hw(hw);
57
58 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
59
60 return status;
61}
62
63
64
65
66
67
68
69
70static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
71{
72 struct ixgbe_mbx_info *mbx = &hw->mbx;
73 u32 timeout = IXGBE_VF_INIT_TIMEOUT;
74 s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
75 u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
76 u8 *addr = (u8 *)(&msgbuf[1]);
77
78
79 hw->mac.ops.stop_adapter(hw);
80
81
82 hw->api_version = ixgbe_mbox_api_10;
83
84 IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
85 IXGBE_WRITE_FLUSH(hw);
86
87
88 while (!mbx->ops.check_for_rst(hw) && timeout) {
89 timeout--;
90 udelay(5);
91 }
92
93 if (!timeout)
94 return IXGBE_ERR_RESET_FAILED;
95
96
97 mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
98
99 msgbuf[0] = IXGBE_VF_RESET;
100 mbx->ops.write_posted(hw, msgbuf, 1);
101
102 mdelay(10);
103
104
105
106
107
108 ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
109 if (ret_val)
110 return ret_val;
111
112
113
114
115
116 if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) &&
117 msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
118 return IXGBE_ERR_INVALID_MAC_ADDR;
119
120 ether_addr_copy(hw->mac.perm_addr, addr);
121 hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
122
123 return 0;
124}
125
126
127
128
129
130
131
132
133
134
135static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
136{
137 u32 number_of_queues;
138 u32 reg_val;
139 u16 i;
140
141
142
143
144 hw->adapter_stopped = true;
145
146
147 number_of_queues = hw->mac.max_rx_queues;
148 for (i = 0; i < number_of_queues; i++) {
149 reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
150 if (reg_val & IXGBE_RXDCTL_ENABLE) {
151 reg_val &= ~IXGBE_RXDCTL_ENABLE;
152 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
153 }
154 }
155
156 IXGBE_WRITE_FLUSH(hw);
157
158
159 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
160
161
162 IXGBE_READ_REG(hw, IXGBE_VTEICR);
163
164
165 number_of_queues = hw->mac.max_tx_queues;
166 for (i = 0; i < number_of_queues; i++) {
167 reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
168 if (reg_val & IXGBE_TXDCTL_ENABLE) {
169 reg_val &= ~IXGBE_TXDCTL_ENABLE;
170 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
171 }
172 }
173
174 return 0;
175}
176
177
178
179
180
181
182
183
184
185
186
187
188
189static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
190{
191 u32 vector = 0;
192
193 switch (hw->mac.mc_filter_type) {
194 case 0:
195 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
196 break;
197 case 1:
198 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
199 break;
200 case 2:
201 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
202 break;
203 case 3:
204 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
205 break;
206 default:
207 break;
208 }
209
210
211 vector &= 0xFFF;
212 return vector;
213}
214
215
216
217
218
219
220static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
221{
222 ether_addr_copy(mac_addr, hw->mac.perm_addr);
223
224 return 0;
225}
226
227static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
228{
229 struct ixgbe_mbx_info *mbx = &hw->mbx;
230 u32 msgbuf[3];
231 u8 *msg_addr = (u8 *)(&msgbuf[1]);
232 s32 ret_val;
233
234 memset(msgbuf, 0, sizeof(msgbuf));
235
236
237
238
239
240 msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
241 msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
242 if (addr)
243 ether_addr_copy(msg_addr, addr);
244 ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
245
246 if (!ret_val)
247 ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
248
249 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
250
251 if (!ret_val)
252 if (msgbuf[0] ==
253 (IXGBE_VF_SET_MACVLAN | IXGBE_VT_MSGTYPE_NACK))
254 ret_val = -ENOMEM;
255
256 return ret_val;
257}
258
259
260
261
262
263
264
265
266
267
268
269
270int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
271{
272 int err, i, j;
273 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
274 u32 *hw_reta = &msgbuf[1];
275 u32 mask = 0;
276
277
278
279
280
281
282 int dwords = IXGBEVF_82599_RETA_SIZE / 16;
283
284
285
286
287
288 if (hw->api_version != ixgbe_mbox_api_12 ||
289 hw->mac.type >= ixgbe_mac_X550_vf)
290 return -EOPNOTSUPP;
291
292 msgbuf[0] = IXGBE_VF_GET_RETA;
293
294 err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
295
296 if (err)
297 return err;
298
299 err = hw->mbx.ops.read_posted(hw, msgbuf, dwords + 1);
300
301 if (err)
302 return err;
303
304 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
305
306
307 if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK))
308 return -EPERM;
309
310
311
312
313
314 if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_ACK))
315 return IXGBE_ERR_MBX;
316
317
318 if (num_rx_queues > 1)
319 mask = 0x1;
320
321 for (i = 0; i < dwords; i++)
322 for (j = 0; j < 16; j++)
323 reta[i * 16 + j] = (hw_reta[i] >> (2 * j)) & mask;
324
325 return 0;
326}
327
328
329
330
331
332
333
334
335
336
337
338int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
339{
340 int err;
341 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
342
343
344
345
346
347
348
349 if (hw->api_version != ixgbe_mbox_api_12 ||
350 hw->mac.type >= ixgbe_mac_X550_vf)
351 return -EOPNOTSUPP;
352
353 msgbuf[0] = IXGBE_VF_GET_RSS_KEY;
354 err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
355
356 if (err)
357 return err;
358
359 err = hw->mbx.ops.read_posted(hw, msgbuf, 11);
360
361 if (err)
362 return err;
363
364 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
365
366
367 if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK))
368 return -EPERM;
369
370
371
372
373
374 if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_ACK))
375 return IXGBE_ERR_MBX;
376
377 memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE);
378
379 return 0;
380}
381
382
383
384
385
386
387
388
389static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
390 u32 vmdq)
391{
392 struct ixgbe_mbx_info *mbx = &hw->mbx;
393 u32 msgbuf[3];
394 u8 *msg_addr = (u8 *)(&msgbuf[1]);
395 s32 ret_val;
396
397 memset(msgbuf, 0, sizeof(msgbuf));
398 msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
399 ether_addr_copy(msg_addr, addr);
400 ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
401
402 if (!ret_val)
403 ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
404
405 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
406
407
408 if (!ret_val &&
409 (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK)))
410 ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
411
412 return ret_val;
413}
414
415static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw,
416 u32 *msg, u16 size)
417{
418 struct ixgbe_mbx_info *mbx = &hw->mbx;
419 u32 retmsg[IXGBE_VFMAILBOX_SIZE];
420 s32 retval = mbx->ops.write_posted(hw, msg, size);
421
422 if (!retval)
423 mbx->ops.read_posted(hw, retmsg, size);
424}
425
426
427
428
429
430
431
432
433static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
434 struct net_device *netdev)
435{
436 struct netdev_hw_addr *ha;
437 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
438 u16 *vector_list = (u16 *)&msgbuf[1];
439 u32 cnt, i;
440
441
442
443
444
445
446
447
448
449
450 cnt = netdev_mc_count(netdev);
451 if (cnt > 30)
452 cnt = 30;
453 msgbuf[0] = IXGBE_VF_SET_MULTICAST;
454 msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
455
456 i = 0;
457 netdev_for_each_mc_addr(ha, netdev) {
458 if (i == cnt)
459 break;
460 if (is_link_local_ether_addr(ha->addr))
461 continue;
462
463 vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
464 }
465
466 ixgbevf_write_msg_read_ack(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
467
468 return 0;
469}
470
471
472
473
474
475
476
477
478static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
479 bool vlan_on)
480{
481 struct ixgbe_mbx_info *mbx = &hw->mbx;
482 u32 msgbuf[2];
483 s32 err;
484
485 msgbuf[0] = IXGBE_VF_SET_VLAN;
486 msgbuf[1] = vlan;
487
488 msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
489
490 err = mbx->ops.write_posted(hw, msgbuf, 2);
491 if (err)
492 goto mbx_err;
493
494 err = mbx->ops.read_posted(hw, msgbuf, 2);
495 if (err)
496 goto mbx_err;
497
498
499 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
500 msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT);
501
502 if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_ACK))
503 err = IXGBE_ERR_INVALID_ARGUMENT;
504
505mbx_err:
506 return err;
507}
508
509
510
511
512
513
514
515
516
517
518
519static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
520 ixgbe_link_speed speed, bool autoneg,
521 bool autoneg_wait_to_complete)
522{
523 return 0;
524}
525
526
527
528
529
530
531
532
533
534
535static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
536 ixgbe_link_speed *speed,
537 bool *link_up,
538 bool autoneg_wait_to_complete)
539{
540 struct ixgbe_mbx_info *mbx = &hw->mbx;
541 struct ixgbe_mac_info *mac = &hw->mac;
542 s32 ret_val = 0;
543 u32 links_reg;
544 u32 in_msg = 0;
545
546
547 if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
548 mac->get_link_status = true;
549
550 if (!mac->get_link_status)
551 goto out;
552
553
554 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
555 if (!(links_reg & IXGBE_LINKS_UP))
556 goto out;
557
558
559
560
561 if (mac->type == ixgbe_mac_82599_vf) {
562 int i;
563
564 for (i = 0; i < 5; i++) {
565 udelay(100);
566 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
567
568 if (!(links_reg & IXGBE_LINKS_UP))
569 goto out;
570 }
571 }
572
573 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
574 case IXGBE_LINKS_SPEED_10G_82599:
575 *speed = IXGBE_LINK_SPEED_10GB_FULL;
576 break;
577 case IXGBE_LINKS_SPEED_1G_82599:
578 *speed = IXGBE_LINK_SPEED_1GB_FULL;
579 break;
580 case IXGBE_LINKS_SPEED_100_82599:
581 *speed = IXGBE_LINK_SPEED_100_FULL;
582 break;
583 }
584
585
586
587
588 if (mbx->ops.read(hw, &in_msg, 1))
589 goto out;
590
591 if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
592
593 if (in_msg & IXGBE_VT_MSGTYPE_NACK)
594 ret_val = -1;
595 goto out;
596 }
597
598
599 if (!mbx->timeout) {
600 ret_val = -1;
601 goto out;
602 }
603
604
605
606
607 mac->get_link_status = false;
608
609out:
610 *link_up = !mac->get_link_status;
611 return ret_val;
612}
613
614
615
616
617
618
619void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
620{
621 u32 msgbuf[2];
622
623 msgbuf[0] = IXGBE_VF_SET_LPE;
624 msgbuf[1] = max_size;
625 ixgbevf_write_msg_read_ack(hw, msgbuf, 2);
626}
627
628
629
630
631
632
633int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
634{
635 int err;
636 u32 msg[3];
637
638
639 msg[0] = IXGBE_VF_API_NEGOTIATE;
640 msg[1] = api;
641 msg[2] = 0;
642 err = hw->mbx.ops.write_posted(hw, msg, 3);
643
644 if (!err)
645 err = hw->mbx.ops.read_posted(hw, msg, 3);
646
647 if (!err) {
648 msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
649
650
651 if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
652 hw->api_version = api;
653 return 0;
654 }
655
656 err = IXGBE_ERR_INVALID_ARGUMENT;
657 }
658
659 return err;
660}
661
662int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
663 unsigned int *default_tc)
664{
665 int err;
666 u32 msg[5];
667
668
669 switch (hw->api_version) {
670 case ixgbe_mbox_api_11:
671 case ixgbe_mbox_api_12:
672 break;
673 default:
674 return 0;
675 }
676
677
678 msg[0] = IXGBE_VF_GET_QUEUE;
679 msg[1] = msg[2] = msg[3] = msg[4] = 0;
680 err = hw->mbx.ops.write_posted(hw, msg, 5);
681
682 if (!err)
683 err = hw->mbx.ops.read_posted(hw, msg, 5);
684
685 if (!err) {
686 msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
687
688
689
690
691
692 if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_ACK))
693 return IXGBE_ERR_MBX;
694
695
696 hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
697 if (hw->mac.max_tx_queues == 0 ||
698 hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
699 hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
700
701 hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
702 if (hw->mac.max_rx_queues == 0 ||
703 hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
704 hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
705
706 *num_tcs = msg[IXGBE_VF_TRANS_VLAN];
707
708 if (*num_tcs > hw->mac.max_rx_queues)
709 *num_tcs = 1;
710
711 *default_tc = msg[IXGBE_VF_DEF_QUEUE];
712
713 if (*default_tc >= hw->mac.max_tx_queues)
714 *default_tc = 0;
715 }
716
717 return err;
718}
719
720static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
721 .init_hw = ixgbevf_init_hw_vf,
722 .reset_hw = ixgbevf_reset_hw_vf,
723 .start_hw = ixgbevf_start_hw_vf,
724 .get_mac_addr = ixgbevf_get_mac_addr_vf,
725 .stop_adapter = ixgbevf_stop_hw_vf,
726 .setup_link = ixgbevf_setup_mac_link_vf,
727 .check_link = ixgbevf_check_mac_link_vf,
728 .set_rar = ixgbevf_set_rar_vf,
729 .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
730 .set_uc_addr = ixgbevf_set_uc_addr_vf,
731 .set_vfta = ixgbevf_set_vfta_vf,
732};
733
734const struct ixgbevf_info ixgbevf_82599_vf_info = {
735 .mac = ixgbe_mac_82599_vf,
736 .mac_ops = &ixgbevf_mac_ops,
737};
738
739const struct ixgbevf_info ixgbevf_X540_vf_info = {
740 .mac = ixgbe_mac_X540_vf,
741 .mac_ops = &ixgbevf_mac_ops,
742};
743
744const struct ixgbevf_info ixgbevf_X550_vf_info = {
745 .mac = ixgbe_mac_X550_vf,
746 .mac_ops = &ixgbevf_mac_ops,
747};
748
749const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = {
750 .mac = ixgbe_mac_X550EM_x_vf,
751 .mac_ops = &ixgbevf_mac_ops,
752};
753