1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include "vf.h"
29#include "ixgbevf.h"
30
31
32
33
34
35
36
37
38
39
40static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
41{
42
43 hw->adapter_stopped = false;
44
45 return 0;
46}
47
48
49
50
51
52
53
54
55static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
56{
57 s32 status = hw->mac.ops.start_hw(hw);
58
59 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
60
61 return status;
62}
63
64
65
66
67
68
69
70
71static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
72{
73 struct ixgbe_mbx_info *mbx = &hw->mbx;
74 u32 timeout = IXGBE_VF_INIT_TIMEOUT;
75 s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
76 u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
77 u8 *addr = (u8 *)(&msgbuf[1]);
78
79
80 hw->mac.ops.stop_adapter(hw);
81
82
83 hw->api_version = ixgbe_mbox_api_10;
84
85 IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
86 IXGBE_WRITE_FLUSH(hw);
87
88
89 while (!mbx->ops.check_for_rst(hw) && timeout) {
90 timeout--;
91 udelay(5);
92 }
93
94 if (!timeout)
95 return IXGBE_ERR_RESET_FAILED;
96
97
98 mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
99
100 msgbuf[0] = IXGBE_VF_RESET;
101 mbx->ops.write_posted(hw, msgbuf, 1);
102
103 mdelay(10);
104
105
106
107
108 ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
109 if (ret_val)
110 return ret_val;
111
112
113
114
115
116 if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) &&
117 msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
118 return IXGBE_ERR_INVALID_MAC_ADDR;
119
120 memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
121 hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
122
123 return 0;
124}
125
126
127
128
129
130
131
132
133
134
135static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
136{
137 u32 number_of_queues;
138 u32 reg_val;
139 u16 i;
140
141
142
143
144
145 hw->adapter_stopped = true;
146
147
148 number_of_queues = hw->mac.max_rx_queues;
149 for (i = 0; i < number_of_queues; i++) {
150 reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
151 if (reg_val & IXGBE_RXDCTL_ENABLE) {
152 reg_val &= ~IXGBE_RXDCTL_ENABLE;
153 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
154 }
155 }
156
157 IXGBE_WRITE_FLUSH(hw);
158
159
160 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
161
162
163 IXGBE_READ_REG(hw, IXGBE_VTEICR);
164
165
166 number_of_queues = hw->mac.max_tx_queues;
167 for (i = 0; i < number_of_queues; i++) {
168 reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
169 if (reg_val & IXGBE_TXDCTL_ENABLE) {
170 reg_val &= ~IXGBE_TXDCTL_ENABLE;
171 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
172 }
173 }
174
175 return 0;
176}
177
178
179
180
181
182
183
184
185
186
187
188
189
190static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
191{
192 u32 vector = 0;
193
194 switch (hw->mac.mc_filter_type) {
195 case 0:
196 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
197 break;
198 case 1:
199 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
200 break;
201 case 2:
202 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
203 break;
204 case 3:
205 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
206 break;
207 default:
208 break;
209 }
210
211
212 vector &= 0xFFF;
213 return vector;
214}
215
216
217
218
219
220
221static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
222{
223 memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN);
224
225 return 0;
226}
227
228static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
229{
230 struct ixgbe_mbx_info *mbx = &hw->mbx;
231 u32 msgbuf[3];
232 u8 *msg_addr = (u8 *)(&msgbuf[1]);
233 s32 ret_val;
234
235 memset(msgbuf, 0, sizeof(msgbuf));
236
237
238
239
240
241
242 msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
243 msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
244 if (addr)
245 memcpy(msg_addr, addr, 6);
246 ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
247
248 if (!ret_val)
249 ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
250
251 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
252
253 if (!ret_val)
254 if (msgbuf[0] ==
255 (IXGBE_VF_SET_MACVLAN | IXGBE_VT_MSGTYPE_NACK))
256 ret_val = -ENOMEM;
257
258 return ret_val;
259}
260
261
262
263
264
265
266
267
268static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
269 u32 vmdq)
270{
271 struct ixgbe_mbx_info *mbx = &hw->mbx;
272 u32 msgbuf[3];
273 u8 *msg_addr = (u8 *)(&msgbuf[1]);
274 s32 ret_val;
275
276 memset(msgbuf, 0, sizeof(msgbuf));
277 msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
278 memcpy(msg_addr, addr, 6);
279 ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
280
281 if (!ret_val)
282 ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
283
284 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
285
286
287 if (!ret_val &&
288 (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK)))
289 ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
290
291 return ret_val;
292}
293
294static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw,
295 u32 *msg, u16 size)
296{
297 struct ixgbe_mbx_info *mbx = &hw->mbx;
298 u32 retmsg[IXGBE_VFMAILBOX_SIZE];
299 s32 retval = mbx->ops.write_posted(hw, msg, size);
300
301 if (!retval)
302 mbx->ops.read_posted(hw, retmsg, size);
303}
304
305
306
307
308
309
310
311
312static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
313 struct net_device *netdev)
314{
315 struct netdev_hw_addr *ha;
316 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
317 u16 *vector_list = (u16 *)&msgbuf[1];
318 u32 cnt, i;
319
320
321
322
323
324
325
326
327
328
329 cnt = netdev_mc_count(netdev);
330 if (cnt > 30)
331 cnt = 30;
332 msgbuf[0] = IXGBE_VF_SET_MULTICAST;
333 msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
334
335 i = 0;
336 netdev_for_each_mc_addr(ha, netdev) {
337 if (i == cnt)
338 break;
339 if (is_link_local_ether_addr(ha->addr))
340 continue;
341
342 vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
343 }
344
345 ixgbevf_write_msg_read_ack(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
346
347 return 0;
348}
349
350
351
352
353
354
355
356
357static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
358 bool vlan_on)
359{
360 struct ixgbe_mbx_info *mbx = &hw->mbx;
361 u32 msgbuf[2];
362 s32 err;
363
364 msgbuf[0] = IXGBE_VF_SET_VLAN;
365 msgbuf[1] = vlan;
366
367 msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
368
369 err = mbx->ops.write_posted(hw, msgbuf, 2);
370 if (err)
371 goto mbx_err;
372
373 err = mbx->ops.read_posted(hw, msgbuf, 2);
374 if (err)
375 goto mbx_err;
376
377
378 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
379 msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT);
380
381 if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_ACK))
382 err = IXGBE_ERR_INVALID_ARGUMENT;
383
384mbx_err:
385 return err;
386}
387
388
389
390
391
392
393
394
395
396
397
398static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
399 ixgbe_link_speed speed, bool autoneg,
400 bool autoneg_wait_to_complete)
401{
402 return 0;
403}
404
405
406
407
408
409
410
411
412
413
414static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
415 ixgbe_link_speed *speed,
416 bool *link_up,
417 bool autoneg_wait_to_complete)
418{
419 struct ixgbe_mbx_info *mbx = &hw->mbx;
420 struct ixgbe_mac_info *mac = &hw->mac;
421 s32 ret_val = 0;
422 u32 links_reg;
423 u32 in_msg = 0;
424
425
426 if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
427 mac->get_link_status = true;
428
429 if (!mac->get_link_status)
430 goto out;
431
432
433 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
434 if (!(links_reg & IXGBE_LINKS_UP))
435 goto out;
436
437 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
438 case IXGBE_LINKS_SPEED_10G_82599:
439 *speed = IXGBE_LINK_SPEED_10GB_FULL;
440 break;
441 case IXGBE_LINKS_SPEED_1G_82599:
442 *speed = IXGBE_LINK_SPEED_1GB_FULL;
443 break;
444 case IXGBE_LINKS_SPEED_100_82599:
445 *speed = IXGBE_LINK_SPEED_100_FULL;
446 break;
447 }
448
449
450
451 if (mbx->ops.read(hw, &in_msg, 1))
452 goto out;
453
454 if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
455
456 if (in_msg & IXGBE_VT_MSGTYPE_NACK)
457 ret_val = -1;
458 goto out;
459 }
460
461
462 if (!mbx->timeout) {
463 ret_val = -1;
464 goto out;
465 }
466
467
468
469 mac->get_link_status = false;
470
471out:
472 *link_up = !mac->get_link_status;
473 return ret_val;
474}
475
476
477
478
479
480
481void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
482{
483 u32 msgbuf[2];
484
485 msgbuf[0] = IXGBE_VF_SET_LPE;
486 msgbuf[1] = max_size;
487 ixgbevf_write_msg_read_ack(hw, msgbuf, 2);
488}
489
490
491
492
493
494
495int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
496{
497 int err;
498 u32 msg[3];
499
500
501 msg[0] = IXGBE_VF_API_NEGOTIATE;
502 msg[1] = api;
503 msg[2] = 0;
504 err = hw->mbx.ops.write_posted(hw, msg, 3);
505
506 if (!err)
507 err = hw->mbx.ops.read_posted(hw, msg, 3);
508
509 if (!err) {
510 msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
511
512
513 if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
514 hw->api_version = api;
515 return 0;
516 }
517
518 err = IXGBE_ERR_INVALID_ARGUMENT;
519 }
520
521 return err;
522}
523
524int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
525 unsigned int *default_tc)
526{
527 int err;
528 u32 msg[5];
529
530
531 switch (hw->api_version) {
532 case ixgbe_mbox_api_11:
533 break;
534 default:
535 return 0;
536 }
537
538
539 msg[0] = IXGBE_VF_GET_QUEUE;
540 msg[1] = msg[2] = msg[3] = msg[4] = 0;
541 err = hw->mbx.ops.write_posted(hw, msg, 5);
542
543 if (!err)
544 err = hw->mbx.ops.read_posted(hw, msg, 5);
545
546 if (!err) {
547 msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
548
549
550
551
552
553
554 if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_ACK))
555 return IXGBE_ERR_MBX;
556
557
558 hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
559 if (hw->mac.max_tx_queues == 0 ||
560 hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
561 hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
562
563 hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
564 if (hw->mac.max_rx_queues == 0 ||
565 hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
566 hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
567
568 *num_tcs = msg[IXGBE_VF_TRANS_VLAN];
569
570 if (*num_tcs > hw->mac.max_rx_queues)
571 *num_tcs = 1;
572
573 *default_tc = msg[IXGBE_VF_DEF_QUEUE];
574
575 if (*default_tc >= hw->mac.max_tx_queues)
576 *default_tc = 0;
577 }
578
579 return err;
580}
581
582static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
583 .init_hw = ixgbevf_init_hw_vf,
584 .reset_hw = ixgbevf_reset_hw_vf,
585 .start_hw = ixgbevf_start_hw_vf,
586 .get_mac_addr = ixgbevf_get_mac_addr_vf,
587 .stop_adapter = ixgbevf_stop_hw_vf,
588 .setup_link = ixgbevf_setup_mac_link_vf,
589 .check_link = ixgbevf_check_mac_link_vf,
590 .set_rar = ixgbevf_set_rar_vf,
591 .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
592 .set_uc_addr = ixgbevf_set_uc_addr_vf,
593 .set_vfta = ixgbevf_set_vfta_vf,
594};
595
596const struct ixgbevf_info ixgbevf_82599_vf_info = {
597 .mac = ixgbe_mac_82599_vf,
598 .mac_ops = &ixgbevf_mac_ops,
599};
600
601const struct ixgbevf_info ixgbevf_X540_vf_info = {
602 .mac = ixgbe_mac_X540_vf,
603 .mac_ops = &ixgbevf_mac_ops,
604};
605