1
2
3
4#include "iavf.h"
5#include "iavf_prototype.h"
6#include "iavf_client.h"
7
8
9#define IAVF_BUSY_WAIT_DELAY 10
10#define IAVF_BUSY_WAIT_COUNT 50
11
12
13
14
15
16
17
18
19
20
21static int iavf_send_pf_msg(struct iavf_adapter *adapter,
22 enum virtchnl_ops op, u8 *msg, u16 len)
23{
24 struct iavf_hw *hw = &adapter->hw;
25 enum iavf_status err;
26
27 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
28 return 0;
29
30 err = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
31 if (err)
32 dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n",
33 op, iavf_stat_str(hw, err),
34 iavf_aq_str(hw, hw->aq.asq_last_status));
35 return err;
36}
37
38
39
40
41
42
43
44
45
46int iavf_send_api_ver(struct iavf_adapter *adapter)
47{
48 struct virtchnl_version_info vvi;
49
50 vvi.major = VIRTCHNL_VERSION_MAJOR;
51 vvi.minor = VIRTCHNL_VERSION_MINOR;
52
53 return iavf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi,
54 sizeof(vvi));
55}
56
57
58
59
60
61
62
63
64
65
66int iavf_verify_api_ver(struct iavf_adapter *adapter)
67{
68 struct virtchnl_version_info *pf_vvi;
69 struct iavf_hw *hw = &adapter->hw;
70 struct iavf_arq_event_info event;
71 enum virtchnl_ops op;
72 enum iavf_status err;
73
74 event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
75 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
76 if (!event.msg_buf) {
77 err = -ENOMEM;
78 goto out;
79 }
80
81 while (1) {
82 err = iavf_clean_arq_element(hw, &event, NULL);
83
84
85
86 if (err)
87 goto out_alloc;
88 op =
89 (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
90 if (op == VIRTCHNL_OP_VERSION)
91 break;
92 }
93
94
95 err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
96 if (err)
97 goto out_alloc;
98
99 if (op != VIRTCHNL_OP_VERSION) {
100 dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n",
101 op);
102 err = -EIO;
103 goto out_alloc;
104 }
105
106 pf_vvi = (struct virtchnl_version_info *)event.msg_buf;
107 adapter->pf_version = *pf_vvi;
108
109 if ((pf_vvi->major > VIRTCHNL_VERSION_MAJOR) ||
110 ((pf_vvi->major == VIRTCHNL_VERSION_MAJOR) &&
111 (pf_vvi->minor > VIRTCHNL_VERSION_MINOR)))
112 err = -EIO;
113
114out_alloc:
115 kfree(event.msg_buf);
116out:
117 return err;
118}
119
120
121
122
123
124
125
126
127
128int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
129{
130 u32 caps;
131
132 caps = VIRTCHNL_VF_OFFLOAD_L2 |
133 VIRTCHNL_VF_OFFLOAD_RSS_PF |
134 VIRTCHNL_VF_OFFLOAD_RSS_AQ |
135 VIRTCHNL_VF_OFFLOAD_RSS_REG |
136 VIRTCHNL_VF_OFFLOAD_VLAN |
137 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
138 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
139 VIRTCHNL_VF_OFFLOAD_ENCAP |
140 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
141 VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
142 VIRTCHNL_VF_OFFLOAD_ADQ |
143 VIRTCHNL_VF_OFFLOAD_USO |
144 VIRTCHNL_VF_OFFLOAD_FDIR_PF |
145 VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
146 VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
147
148 adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES;
149 adapter->aq_required &= ~IAVF_FLAG_AQ_GET_CONFIG;
150 if (PF_IS_V11(adapter))
151 return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES,
152 (u8 *)&caps, sizeof(caps));
153 else
154 return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES,
155 NULL, 0);
156}
157
158
159
160
161
162
163
164
165static void iavf_validate_num_queues(struct iavf_adapter *adapter)
166{
167 if (adapter->vf_res->num_queue_pairs > IAVF_MAX_REQ_QUEUES) {
168 struct virtchnl_vsi_resource *vsi_res;
169 int i;
170
171 dev_info(&adapter->pdev->dev, "Received %d queues, but can only have a max of %d\n",
172 adapter->vf_res->num_queue_pairs,
173 IAVF_MAX_REQ_QUEUES);
174 dev_info(&adapter->pdev->dev, "Fixing by reducing queues to %d\n",
175 IAVF_MAX_REQ_QUEUES);
176 adapter->vf_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES;
177 for (i = 0; i < adapter->vf_res->num_vsis; i++) {
178 vsi_res = &adapter->vf_res->vsi_res[i];
179 vsi_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES;
180 }
181 }
182}
183
184
185
186
187
188
189
190
191
192
193int iavf_get_vf_config(struct iavf_adapter *adapter)
194{
195 struct iavf_hw *hw = &adapter->hw;
196 struct iavf_arq_event_info event;
197 enum virtchnl_ops op;
198 enum iavf_status err;
199 u16 len;
200
201 len = sizeof(struct virtchnl_vf_resource) +
202 IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
203 event.buf_len = len;
204 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
205 if (!event.msg_buf) {
206 err = -ENOMEM;
207 goto out;
208 }
209
210 while (1) {
211
212
213
214 err = iavf_clean_arq_element(hw, &event, NULL);
215 if (err)
216 goto out_alloc;
217 op =
218 (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
219 if (op == VIRTCHNL_OP_GET_VF_RESOURCES)
220 break;
221 }
222
223 err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
224 memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len));
225
226
227
228
229 if (!err)
230 iavf_validate_num_queues(adapter);
231 iavf_vf_parse_hw_config(hw, adapter->vf_res);
232out_alloc:
233 kfree(event.msg_buf);
234out:
235 return err;
236}
237
238
239
240
241
242
243
244void iavf_configure_queues(struct iavf_adapter *adapter)
245{
246 struct virtchnl_vsi_queue_config_info *vqci;
247 struct virtchnl_queue_pair_info *vqpi;
248 int pairs = adapter->num_active_queues;
249 int i, max_frame = IAVF_MAX_RXBUFFER;
250 size_t len;
251
252 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
253
254 dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
255 adapter->current_op);
256 return;
257 }
258 adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
259 len = struct_size(vqci, qpair, pairs);
260 vqci = kzalloc(len, GFP_KERNEL);
261 if (!vqci)
262 return;
263
264
265 if (!(adapter->flags & IAVF_FLAG_LEGACY_RX) &&
266 (adapter->netdev->mtu <= ETH_DATA_LEN))
267 max_frame = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
268
269 vqci->vsi_id = adapter->vsi_res->vsi_id;
270 vqci->num_queue_pairs = pairs;
271 vqpi = vqci->qpair;
272
273
274
275 for (i = 0; i < pairs; i++) {
276 vqpi->txq.vsi_id = vqci->vsi_id;
277 vqpi->txq.queue_id = i;
278 vqpi->txq.ring_len = adapter->tx_rings[i].count;
279 vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma;
280 vqpi->rxq.vsi_id = vqci->vsi_id;
281 vqpi->rxq.queue_id = i;
282 vqpi->rxq.ring_len = adapter->rx_rings[i].count;
283 vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma;
284 vqpi->rxq.max_pkt_size = max_frame;
285 vqpi->rxq.databuffer_size =
286 ALIGN(adapter->rx_rings[i].rx_buf_len,
287 BIT_ULL(IAVF_RXQ_CTX_DBUFF_SHIFT));
288 vqpi++;
289 }
290
291 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_QUEUES;
292 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
293 (u8 *)vqci, len);
294 kfree(vqci);
295}
296
297
298
299
300
301
302
303void iavf_enable_queues(struct iavf_adapter *adapter)
304{
305 struct virtchnl_queue_select vqs;
306
307 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
308
309 dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n",
310 adapter->current_op);
311 return;
312 }
313 adapter->current_op = VIRTCHNL_OP_ENABLE_QUEUES;
314 vqs.vsi_id = adapter->vsi_res->vsi_id;
315 vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
316 vqs.rx_queues = vqs.tx_queues;
317 adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_QUEUES;
318 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES,
319 (u8 *)&vqs, sizeof(vqs));
320}
321
322
323
324
325
326
327
328void iavf_disable_queues(struct iavf_adapter *adapter)
329{
330 struct virtchnl_queue_select vqs;
331
332 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
333
334 dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n",
335 adapter->current_op);
336 return;
337 }
338 adapter->current_op = VIRTCHNL_OP_DISABLE_QUEUES;
339 vqs.vsi_id = adapter->vsi_res->vsi_id;
340 vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
341 vqs.rx_queues = vqs.tx_queues;
342 adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_QUEUES;
343 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES,
344 (u8 *)&vqs, sizeof(vqs));
345}
346
347
348
349
350
351
352
353
354void iavf_map_queues(struct iavf_adapter *adapter)
355{
356 struct virtchnl_irq_map_info *vimi;
357 struct virtchnl_vector_map *vecmap;
358 struct iavf_q_vector *q_vector;
359 int v_idx, q_vectors;
360 size_t len;
361
362 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
363
364 dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n",
365 adapter->current_op);
366 return;
367 }
368 adapter->current_op = VIRTCHNL_OP_CONFIG_IRQ_MAP;
369
370 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
371
372 len = struct_size(vimi, vecmap, adapter->num_msix_vectors);
373 vimi = kzalloc(len, GFP_KERNEL);
374 if (!vimi)
375 return;
376
377 vimi->num_vectors = adapter->num_msix_vectors;
378
379 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
380 q_vector = &adapter->q_vectors[v_idx];
381 vecmap = &vimi->vecmap[v_idx];
382
383 vecmap->vsi_id = adapter->vsi_res->vsi_id;
384 vecmap->vector_id = v_idx + NONQ_VECS;
385 vecmap->txq_map = q_vector->ring_mask;
386 vecmap->rxq_map = q_vector->ring_mask;
387 vecmap->rxitr_idx = IAVF_RX_ITR;
388 vecmap->txitr_idx = IAVF_TX_ITR;
389 }
390
391 vecmap = &vimi->vecmap[v_idx];
392 vecmap->vsi_id = adapter->vsi_res->vsi_id;
393 vecmap->vector_id = 0;
394 vecmap->txq_map = 0;
395 vecmap->rxq_map = 0;
396
397 adapter->aq_required &= ~IAVF_FLAG_AQ_MAP_VECTORS;
398 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP,
399 (u8 *)vimi, len);
400 kfree(vimi);
401}
402
403
404
405
406
407
408
409void iavf_add_ether_addrs(struct iavf_adapter *adapter)
410{
411 struct virtchnl_ether_addr_list *veal;
412 struct iavf_mac_filter *f;
413 int i = 0, count = 0;
414 bool more = false;
415 size_t len;
416
417 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
418
419 dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n",
420 adapter->current_op);
421 return;
422 }
423
424 spin_lock_bh(&adapter->mac_vlan_list_lock);
425
426 list_for_each_entry(f, &adapter->mac_filter_list, list) {
427 if (f->add)
428 count++;
429 }
430 if (!count) {
431 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER;
432 spin_unlock_bh(&adapter->mac_vlan_list_lock);
433 return;
434 }
435 adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR;
436
437 len = struct_size(veal, list, count);
438 if (len > IAVF_MAX_AQ_BUF_SIZE) {
439 dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n");
440 count = (IAVF_MAX_AQ_BUF_SIZE -
441 sizeof(struct virtchnl_ether_addr_list)) /
442 sizeof(struct virtchnl_ether_addr);
443 len = struct_size(veal, list, count);
444 more = true;
445 }
446
447 veal = kzalloc(len, GFP_ATOMIC);
448 if (!veal) {
449 spin_unlock_bh(&adapter->mac_vlan_list_lock);
450 return;
451 }
452
453 veal->vsi_id = adapter->vsi_res->vsi_id;
454 veal->num_elements = count;
455 list_for_each_entry(f, &adapter->mac_filter_list, list) {
456 if (f->add) {
457 ether_addr_copy(veal->list[i].addr, f->macaddr);
458 i++;
459 f->add = false;
460 if (i == count)
461 break;
462 }
463 }
464 if (!more)
465 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER;
466
467 spin_unlock_bh(&adapter->mac_vlan_list_lock);
468
469 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)veal, len);
470 kfree(veal);
471}
472
473
474
475
476
477
478
479void iavf_del_ether_addrs(struct iavf_adapter *adapter)
480{
481 struct virtchnl_ether_addr_list *veal;
482 struct iavf_mac_filter *f, *ftmp;
483 int i = 0, count = 0;
484 bool more = false;
485 size_t len;
486
487 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
488
489 dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n",
490 adapter->current_op);
491 return;
492 }
493
494 spin_lock_bh(&adapter->mac_vlan_list_lock);
495
496 list_for_each_entry(f, &adapter->mac_filter_list, list) {
497 if (f->remove)
498 count++;
499 }
500 if (!count) {
501 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER;
502 spin_unlock_bh(&adapter->mac_vlan_list_lock);
503 return;
504 }
505 adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR;
506
507 len = struct_size(veal, list, count);
508 if (len > IAVF_MAX_AQ_BUF_SIZE) {
509 dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n");
510 count = (IAVF_MAX_AQ_BUF_SIZE -
511 sizeof(struct virtchnl_ether_addr_list)) /
512 sizeof(struct virtchnl_ether_addr);
513 len = struct_size(veal, list, count);
514 more = true;
515 }
516 veal = kzalloc(len, GFP_ATOMIC);
517 if (!veal) {
518 spin_unlock_bh(&adapter->mac_vlan_list_lock);
519 return;
520 }
521
522 veal->vsi_id = adapter->vsi_res->vsi_id;
523 veal->num_elements = count;
524 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
525 if (f->remove) {
526 ether_addr_copy(veal->list[i].addr, f->macaddr);
527 i++;
528 list_del(&f->list);
529 kfree(f);
530 if (i == count)
531 break;
532 }
533 }
534 if (!more)
535 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER;
536
537 spin_unlock_bh(&adapter->mac_vlan_list_lock);
538
539 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)veal, len);
540 kfree(veal);
541}
542
543
544
545
546
547
548
549static void iavf_mac_add_ok(struct iavf_adapter *adapter)
550{
551 struct iavf_mac_filter *f, *ftmp;
552
553 spin_lock_bh(&adapter->mac_vlan_list_lock);
554 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
555 f->is_new_mac = false;
556 }
557 spin_unlock_bh(&adapter->mac_vlan_list_lock);
558}
559
560
561
562
563
564
565
566static void iavf_mac_add_reject(struct iavf_adapter *adapter)
567{
568 struct net_device *netdev = adapter->netdev;
569 struct iavf_mac_filter *f, *ftmp;
570
571 spin_lock_bh(&adapter->mac_vlan_list_lock);
572 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
573 if (f->remove && ether_addr_equal(f->macaddr, netdev->dev_addr))
574 f->remove = false;
575
576 if (f->is_new_mac) {
577 list_del(&f->list);
578 kfree(f);
579 }
580 }
581 spin_unlock_bh(&adapter->mac_vlan_list_lock);
582}
583
584
585
586
587
588
589
590void iavf_add_vlans(struct iavf_adapter *adapter)
591{
592 struct virtchnl_vlan_filter_list *vvfl;
593 int len, i = 0, count = 0;
594 struct iavf_vlan_filter *f;
595 bool more = false;
596
597 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
598
599 dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n",
600 adapter->current_op);
601 return;
602 }
603
604 spin_lock_bh(&adapter->mac_vlan_list_lock);
605
606 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
607 if (f->add)
608 count++;
609 }
610 if (!count) {
611 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
612 spin_unlock_bh(&adapter->mac_vlan_list_lock);
613 return;
614 }
615 adapter->current_op = VIRTCHNL_OP_ADD_VLAN;
616
617 len = sizeof(struct virtchnl_vlan_filter_list) +
618 (count * sizeof(u16));
619 if (len > IAVF_MAX_AQ_BUF_SIZE) {
620 dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
621 count = (IAVF_MAX_AQ_BUF_SIZE -
622 sizeof(struct virtchnl_vlan_filter_list)) /
623 sizeof(u16);
624 len = sizeof(struct virtchnl_vlan_filter_list) +
625 (count * sizeof(u16));
626 more = true;
627 }
628 vvfl = kzalloc(len, GFP_ATOMIC);
629 if (!vvfl) {
630 spin_unlock_bh(&adapter->mac_vlan_list_lock);
631 return;
632 }
633
634 vvfl->vsi_id = adapter->vsi_res->vsi_id;
635 vvfl->num_elements = count;
636 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
637 if (f->add) {
638 vvfl->vlan_id[i] = f->vlan;
639 i++;
640 f->add = false;
641 if (i == count)
642 break;
643 }
644 }
645 if (!more)
646 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
647
648 spin_unlock_bh(&adapter->mac_vlan_list_lock);
649
650 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
651 kfree(vvfl);
652}
653
654
655
656
657
658
659
660void iavf_del_vlans(struct iavf_adapter *adapter)
661{
662 struct virtchnl_vlan_filter_list *vvfl;
663 struct iavf_vlan_filter *f, *ftmp;
664 int len, i = 0, count = 0;
665 bool more = false;
666
667 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
668
669 dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n",
670 adapter->current_op);
671 return;
672 }
673
674 spin_lock_bh(&adapter->mac_vlan_list_lock);
675
676 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
677 if (f->remove)
678 count++;
679 }
680 if (!count) {
681 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
682 spin_unlock_bh(&adapter->mac_vlan_list_lock);
683 return;
684 }
685 adapter->current_op = VIRTCHNL_OP_DEL_VLAN;
686
687 len = sizeof(struct virtchnl_vlan_filter_list) +
688 (count * sizeof(u16));
689 if (len > IAVF_MAX_AQ_BUF_SIZE) {
690 dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n");
691 count = (IAVF_MAX_AQ_BUF_SIZE -
692 sizeof(struct virtchnl_vlan_filter_list)) /
693 sizeof(u16);
694 len = sizeof(struct virtchnl_vlan_filter_list) +
695 (count * sizeof(u16));
696 more = true;
697 }
698 vvfl = kzalloc(len, GFP_ATOMIC);
699 if (!vvfl) {
700 spin_unlock_bh(&adapter->mac_vlan_list_lock);
701 return;
702 }
703
704 vvfl->vsi_id = adapter->vsi_res->vsi_id;
705 vvfl->num_elements = count;
706 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
707 if (f->remove) {
708 vvfl->vlan_id[i] = f->vlan;
709 i++;
710 list_del(&f->list);
711 kfree(f);
712 if (i == count)
713 break;
714 }
715 }
716 if (!more)
717 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
718
719 spin_unlock_bh(&adapter->mac_vlan_list_lock);
720
721 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
722 kfree(vvfl);
723}
724
725
726
727
728
729
730
731
732void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags)
733{
734 struct virtchnl_promisc_info vpi;
735 int promisc_all;
736
737 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
738
739 dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n",
740 adapter->current_op);
741 return;
742 }
743
744 promisc_all = FLAG_VF_UNICAST_PROMISC |
745 FLAG_VF_MULTICAST_PROMISC;
746 if ((flags & promisc_all) == promisc_all) {
747 adapter->flags |= IAVF_FLAG_PROMISC_ON;
748 adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_PROMISC;
749 dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
750 }
751
752 if (flags & FLAG_VF_MULTICAST_PROMISC) {
753 adapter->flags |= IAVF_FLAG_ALLMULTI_ON;
754 adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_ALLMULTI;
755 dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
756 }
757
758 if (!flags) {
759 adapter->flags &= ~(IAVF_FLAG_PROMISC_ON |
760 IAVF_FLAG_ALLMULTI_ON);
761 adapter->aq_required &= ~(IAVF_FLAG_AQ_RELEASE_PROMISC |
762 IAVF_FLAG_AQ_RELEASE_ALLMULTI);
763 dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
764 }
765
766 adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
767 vpi.vsi_id = adapter->vsi_res->vsi_id;
768 vpi.flags = flags;
769 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
770 (u8 *)&vpi, sizeof(vpi));
771}
772
773
774
775
776
777
778
779void iavf_request_stats(struct iavf_adapter *adapter)
780{
781 struct virtchnl_queue_select vqs;
782
783 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
784
785 return;
786 }
787 adapter->current_op = VIRTCHNL_OP_GET_STATS;
788 vqs.vsi_id = adapter->vsi_res->vsi_id;
789
790 if (iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS, (u8 *)&vqs,
791 sizeof(vqs)))
792
793 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
794}
795
796
797
798
799
800
801
802void iavf_get_hena(struct iavf_adapter *adapter)
803{
804 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
805
806 dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n",
807 adapter->current_op);
808 return;
809 }
810 adapter->current_op = VIRTCHNL_OP_GET_RSS_HENA_CAPS;
811 adapter->aq_required &= ~IAVF_FLAG_AQ_GET_HENA;
812 iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS, NULL, 0);
813}
814
815
816
817
818
819
820
821void iavf_set_hena(struct iavf_adapter *adapter)
822{
823 struct virtchnl_rss_hena vrh;
824
825 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
826
827 dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n",
828 adapter->current_op);
829 return;
830 }
831 vrh.hena = adapter->hena;
832 adapter->current_op = VIRTCHNL_OP_SET_RSS_HENA;
833 adapter->aq_required &= ~IAVF_FLAG_AQ_SET_HENA;
834 iavf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA, (u8 *)&vrh,
835 sizeof(vrh));
836}
837
838
839
840
841
842
843
844void iavf_set_rss_key(struct iavf_adapter *adapter)
845{
846 struct virtchnl_rss_key *vrk;
847 int len;
848
849 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
850
851 dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n",
852 adapter->current_op);
853 return;
854 }
855 len = sizeof(struct virtchnl_rss_key) +
856 (adapter->rss_key_size * sizeof(u8)) - 1;
857 vrk = kzalloc(len, GFP_KERNEL);
858 if (!vrk)
859 return;
860 vrk->vsi_id = adapter->vsi.id;
861 vrk->key_len = adapter->rss_key_size;
862 memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size);
863
864 adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_KEY;
865 adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_KEY;
866 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY, (u8 *)vrk, len);
867 kfree(vrk);
868}
869
870
871
872
873
874
875
876void iavf_set_rss_lut(struct iavf_adapter *adapter)
877{
878 struct virtchnl_rss_lut *vrl;
879 int len;
880
881 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
882
883 dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n",
884 adapter->current_op);
885 return;
886 }
887 len = sizeof(struct virtchnl_rss_lut) +
888 (adapter->rss_lut_size * sizeof(u8)) - 1;
889 vrl = kzalloc(len, GFP_KERNEL);
890 if (!vrl)
891 return;
892 vrl->vsi_id = adapter->vsi.id;
893 vrl->lut_entries = adapter->rss_lut_size;
894 memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size);
895 adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_LUT;
896 adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_LUT;
897 iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT, (u8 *)vrl, len);
898 kfree(vrl);
899}
900
901
902
903
904
905
906
907void iavf_enable_vlan_stripping(struct iavf_adapter *adapter)
908{
909 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
910
911 dev_err(&adapter->pdev->dev, "Cannot enable stripping, command %d pending\n",
912 adapter->current_op);
913 return;
914 }
915 adapter->current_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING;
916 adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
917 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, NULL, 0);
918}
919
920
921
922
923
924
925
926void iavf_disable_vlan_stripping(struct iavf_adapter *adapter)
927{
928 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
929
930 dev_err(&adapter->pdev->dev, "Cannot disable stripping, command %d pending\n",
931 adapter->current_op);
932 return;
933 }
934 adapter->current_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING;
935 adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
936 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, NULL, 0);
937}
938
939#define IAVF_MAX_SPEED_STRLEN 13
940
941
942
943
944
945
946
947static void iavf_print_link_message(struct iavf_adapter *adapter)
948{
949 struct net_device *netdev = adapter->netdev;
950 int link_speed_mbps;
951 char *speed;
952
953 if (!adapter->link_up) {
954 netdev_info(netdev, "NIC Link is Down\n");
955 return;
956 }
957
958 speed = kzalloc(IAVF_MAX_SPEED_STRLEN, GFP_KERNEL);
959 if (!speed)
960 return;
961
962 if (ADV_LINK_SUPPORT(adapter)) {
963 link_speed_mbps = adapter->link_speed_mbps;
964 goto print_link_msg;
965 }
966
967 switch (adapter->link_speed) {
968 case VIRTCHNL_LINK_SPEED_40GB:
969 link_speed_mbps = SPEED_40000;
970 break;
971 case VIRTCHNL_LINK_SPEED_25GB:
972 link_speed_mbps = SPEED_25000;
973 break;
974 case VIRTCHNL_LINK_SPEED_20GB:
975 link_speed_mbps = SPEED_20000;
976 break;
977 case VIRTCHNL_LINK_SPEED_10GB:
978 link_speed_mbps = SPEED_10000;
979 break;
980 case VIRTCHNL_LINK_SPEED_5GB:
981 link_speed_mbps = SPEED_5000;
982 break;
983 case VIRTCHNL_LINK_SPEED_2_5GB:
984 link_speed_mbps = SPEED_2500;
985 break;
986 case VIRTCHNL_LINK_SPEED_1GB:
987 link_speed_mbps = SPEED_1000;
988 break;
989 case VIRTCHNL_LINK_SPEED_100MB:
990 link_speed_mbps = SPEED_100;
991 break;
992 default:
993 link_speed_mbps = SPEED_UNKNOWN;
994 break;
995 }
996
997print_link_msg:
998 if (link_speed_mbps > SPEED_1000) {
999 if (link_speed_mbps == SPEED_2500)
1000 snprintf(speed, IAVF_MAX_SPEED_STRLEN, "2.5 Gbps");
1001 else
1002
1003 snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s",
1004 link_speed_mbps / 1000, "Gbps");
1005 } else if (link_speed_mbps == SPEED_UNKNOWN) {
1006 snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%s", "Unknown Mbps");
1007 } else {
1008 snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%u %s",
1009 link_speed_mbps, "Mbps");
1010 }
1011
1012 netdev_info(netdev, "NIC Link is Up Speed is %s Full Duplex\n", speed);
1013 kfree(speed);
1014}
1015
1016
1017
1018
1019
1020
1021
1022
1023static bool
1024iavf_get_vpe_link_status(struct iavf_adapter *adapter,
1025 struct virtchnl_pf_event *vpe)
1026{
1027 if (ADV_LINK_SUPPORT(adapter))
1028 return vpe->event_data.link_event_adv.link_status;
1029 else
1030 return vpe->event_data.link_event.link_status;
1031}
1032
1033
1034
1035
1036
1037
1038
1039
1040static void
1041iavf_set_adapter_link_speed_from_vpe(struct iavf_adapter *adapter,
1042 struct virtchnl_pf_event *vpe)
1043{
1044 if (ADV_LINK_SUPPORT(adapter))
1045 adapter->link_speed_mbps =
1046 vpe->event_data.link_event_adv.link_speed;
1047 else
1048 adapter->link_speed = vpe->event_data.link_event.link_speed;
1049}
1050
1051
1052
1053
1054
1055
1056
1057
1058void iavf_enable_channels(struct iavf_adapter *adapter)
1059{
1060 struct virtchnl_tc_info *vti = NULL;
1061 size_t len;
1062 int i;
1063
1064 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1065
1066 dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n",
1067 adapter->current_op);
1068 return;
1069 }
1070
1071 len = struct_size(vti, list, adapter->num_tc - 1);
1072 vti = kzalloc(len, GFP_KERNEL);
1073 if (!vti)
1074 return;
1075 vti->num_tc = adapter->num_tc;
1076 for (i = 0; i < vti->num_tc; i++) {
1077 vti->list[i].count = adapter->ch_config.ch_info[i].count;
1078 vti->list[i].offset = adapter->ch_config.ch_info[i].offset;
1079 vti->list[i].pad = 0;
1080 vti->list[i].max_tx_rate =
1081 adapter->ch_config.ch_info[i].max_tx_rate;
1082 }
1083
1084 adapter->ch_config.state = __IAVF_TC_RUNNING;
1085 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
1086 adapter->current_op = VIRTCHNL_OP_ENABLE_CHANNELS;
1087 adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_CHANNELS;
1088 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_CHANNELS, (u8 *)vti, len);
1089 kfree(vti);
1090}
1091
1092
1093
1094
1095
1096
1097
1098void iavf_disable_channels(struct iavf_adapter *adapter)
1099{
1100 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1101
1102 dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n",
1103 adapter->current_op);
1104 return;
1105 }
1106
1107 adapter->ch_config.state = __IAVF_TC_INVALID;
1108 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
1109 adapter->current_op = VIRTCHNL_OP_DISABLE_CHANNELS;
1110 adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_CHANNELS;
1111 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_CHANNELS, NULL, 0);
1112}
1113
1114
1115
1116
1117
1118
1119
1120
1121static void iavf_print_cloud_filter(struct iavf_adapter *adapter,
1122 struct virtchnl_filter *f)
1123{
1124 switch (f->flow_type) {
1125 case VIRTCHNL_TCP_V4_FLOW:
1126 dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI4 src_ip %pI4 dst_port %hu src_port %hu\n",
1127 &f->data.tcp_spec.dst_mac,
1128 &f->data.tcp_spec.src_mac,
1129 ntohs(f->data.tcp_spec.vlan_id),
1130 &f->data.tcp_spec.dst_ip[0],
1131 &f->data.tcp_spec.src_ip[0],
1132 ntohs(f->data.tcp_spec.dst_port),
1133 ntohs(f->data.tcp_spec.src_port));
1134 break;
1135 case VIRTCHNL_TCP_V6_FLOW:
1136 dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI6 src_ip %pI6 dst_port %hu src_port %hu\n",
1137 &f->data.tcp_spec.dst_mac,
1138 &f->data.tcp_spec.src_mac,
1139 ntohs(f->data.tcp_spec.vlan_id),
1140 &f->data.tcp_spec.dst_ip,
1141 &f->data.tcp_spec.src_ip,
1142 ntohs(f->data.tcp_spec.dst_port),
1143 ntohs(f->data.tcp_spec.src_port));
1144 break;
1145 }
1146}
1147
1148
1149
1150
1151
1152
1153
1154
1155void iavf_add_cloud_filter(struct iavf_adapter *adapter)
1156{
1157 struct iavf_cloud_filter *cf;
1158 struct virtchnl_filter *f;
1159 int len = 0, count = 0;
1160
1161 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1162
1163 dev_err(&adapter->pdev->dev, "Cannot add cloud filter, command %d pending\n",
1164 adapter->current_op);
1165 return;
1166 }
1167 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1168 if (cf->add) {
1169 count++;
1170 break;
1171 }
1172 }
1173 if (!count) {
1174 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
1175 return;
1176 }
1177 adapter->current_op = VIRTCHNL_OP_ADD_CLOUD_FILTER;
1178
1179 len = sizeof(struct virtchnl_filter);
1180 f = kzalloc(len, GFP_KERNEL);
1181 if (!f)
1182 return;
1183
1184 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1185 if (cf->add) {
1186 memcpy(f, &cf->f, sizeof(struct virtchnl_filter));
1187 cf->add = false;
1188 cf->state = __IAVF_CF_ADD_PENDING;
1189 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_CLOUD_FILTER,
1190 (u8 *)f, len);
1191 }
1192 }
1193 kfree(f);
1194}
1195
1196
1197
1198
1199
1200
1201
1202
1203void iavf_del_cloud_filter(struct iavf_adapter *adapter)
1204{
1205 struct iavf_cloud_filter *cf, *cftmp;
1206 struct virtchnl_filter *f;
1207 int len = 0, count = 0;
1208
1209 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1210
1211 dev_err(&adapter->pdev->dev, "Cannot remove cloud filter, command %d pending\n",
1212 adapter->current_op);
1213 return;
1214 }
1215 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1216 if (cf->del) {
1217 count++;
1218 break;
1219 }
1220 }
1221 if (!count) {
1222 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
1223 return;
1224 }
1225 adapter->current_op = VIRTCHNL_OP_DEL_CLOUD_FILTER;
1226
1227 len = sizeof(struct virtchnl_filter);
1228 f = kzalloc(len, GFP_KERNEL);
1229 if (!f)
1230 return;
1231
1232 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
1233 if (cf->del) {
1234 memcpy(f, &cf->f, sizeof(struct virtchnl_filter));
1235 cf->del = false;
1236 cf->state = __IAVF_CF_DEL_PENDING;
1237 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_CLOUD_FILTER,
1238 (u8 *)f, len);
1239 }
1240 }
1241 kfree(f);
1242}
1243
1244
1245
1246
1247
1248
1249
1250
1251void iavf_add_fdir_filter(struct iavf_adapter *adapter)
1252{
1253 struct iavf_fdir_fltr *fdir;
1254 struct virtchnl_fdir_add *f;
1255 bool process_fltr = false;
1256 int len;
1257
1258 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1259
1260 dev_err(&adapter->pdev->dev, "Cannot add Flow Director filter, command %d pending\n",
1261 adapter->current_op);
1262 return;
1263 }
1264
1265 len = sizeof(struct virtchnl_fdir_add);
1266 f = kzalloc(len, GFP_KERNEL);
1267 if (!f)
1268 return;
1269
1270 spin_lock_bh(&adapter->fdir_fltr_lock);
1271 list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
1272 if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
1273 process_fltr = true;
1274 fdir->state = IAVF_FDIR_FLTR_ADD_PENDING;
1275 memcpy(f, &fdir->vc_add_msg, len);
1276 break;
1277 }
1278 }
1279 spin_unlock_bh(&adapter->fdir_fltr_lock);
1280
1281 if (!process_fltr) {
1282
1283
1284
1285 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_FDIR_FILTER;
1286 kfree(f);
1287 return;
1288 }
1289 adapter->current_op = VIRTCHNL_OP_ADD_FDIR_FILTER;
1290 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_FDIR_FILTER, (u8 *)f, len);
1291 kfree(f);
1292}
1293
1294
1295
1296
1297
1298
1299
1300
1301void iavf_del_fdir_filter(struct iavf_adapter *adapter)
1302{
1303 struct iavf_fdir_fltr *fdir;
1304 struct virtchnl_fdir_del f;
1305 bool process_fltr = false;
1306 int len;
1307
1308 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1309
1310 dev_err(&adapter->pdev->dev, "Cannot remove Flow Director filter, command %d pending\n",
1311 adapter->current_op);
1312 return;
1313 }
1314
1315 len = sizeof(struct virtchnl_fdir_del);
1316
1317 spin_lock_bh(&adapter->fdir_fltr_lock);
1318 list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
1319 if (fdir->state == IAVF_FDIR_FLTR_DEL_REQUEST) {
1320 process_fltr = true;
1321 memset(&f, 0, len);
1322 f.vsi_id = fdir->vc_add_msg.vsi_id;
1323 f.flow_id = fdir->flow_id;
1324 fdir->state = IAVF_FDIR_FLTR_DEL_PENDING;
1325 break;
1326 }
1327 }
1328 spin_unlock_bh(&adapter->fdir_fltr_lock);
1329
1330 if (!process_fltr) {
1331 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_FDIR_FILTER;
1332 return;
1333 }
1334
1335 adapter->current_op = VIRTCHNL_OP_DEL_FDIR_FILTER;
1336 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_FDIR_FILTER, (u8 *)&f, len);
1337}
1338
1339
1340
1341
1342
1343
1344
1345
1346void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter)
1347{
1348 struct virtchnl_rss_cfg *rss_cfg;
1349 struct iavf_adv_rss *rss;
1350 bool process_rss = false;
1351 int len;
1352
1353 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1354
1355 dev_err(&adapter->pdev->dev, "Cannot add RSS configuration, command %d pending\n",
1356 adapter->current_op);
1357 return;
1358 }
1359
1360 len = sizeof(struct virtchnl_rss_cfg);
1361 rss_cfg = kzalloc(len, GFP_KERNEL);
1362 if (!rss_cfg)
1363 return;
1364
1365 spin_lock_bh(&adapter->adv_rss_lock);
1366 list_for_each_entry(rss, &adapter->adv_rss_list_head, list) {
1367 if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) {
1368 process_rss = true;
1369 rss->state = IAVF_ADV_RSS_ADD_PENDING;
1370 memcpy(rss_cfg, &rss->cfg_msg, len);
1371 iavf_print_adv_rss_cfg(adapter, rss,
1372 "Input set change for",
1373 "is pending");
1374 break;
1375 }
1376 }
1377 spin_unlock_bh(&adapter->adv_rss_lock);
1378
1379 if (process_rss) {
1380 adapter->current_op = VIRTCHNL_OP_ADD_RSS_CFG;
1381 iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_RSS_CFG,
1382 (u8 *)rss_cfg, len);
1383 } else {
1384 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_ADV_RSS_CFG;
1385 }
1386
1387 kfree(rss_cfg);
1388}
1389
1390
1391
1392
1393
1394
1395
1396
1397void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter)
1398{
1399 struct virtchnl_rss_cfg *rss_cfg;
1400 struct iavf_adv_rss *rss;
1401 bool process_rss = false;
1402 int len;
1403
1404 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1405
1406 dev_err(&adapter->pdev->dev, "Cannot remove RSS configuration, command %d pending\n",
1407 adapter->current_op);
1408 return;
1409 }
1410
1411 len = sizeof(struct virtchnl_rss_cfg);
1412 rss_cfg = kzalloc(len, GFP_KERNEL);
1413 if (!rss_cfg)
1414 return;
1415
1416 spin_lock_bh(&adapter->adv_rss_lock);
1417 list_for_each_entry(rss, &adapter->adv_rss_list_head, list) {
1418 if (rss->state == IAVF_ADV_RSS_DEL_REQUEST) {
1419 process_rss = true;
1420 rss->state = IAVF_ADV_RSS_DEL_PENDING;
1421 memcpy(rss_cfg, &rss->cfg_msg, len);
1422 break;
1423 }
1424 }
1425 spin_unlock_bh(&adapter->adv_rss_lock);
1426
1427 if (process_rss) {
1428 adapter->current_op = VIRTCHNL_OP_DEL_RSS_CFG;
1429 iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_RSS_CFG,
1430 (u8 *)rss_cfg, len);
1431 } else {
1432 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
1433 }
1434
1435 kfree(rss_cfg);
1436}
1437
1438
1439
1440
1441
1442
1443
1444void iavf_request_reset(struct iavf_adapter *adapter)
1445{
1446
1447 iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0);
1448 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1449}
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463void iavf_virtchnl_completion(struct iavf_adapter *adapter,
1464 enum virtchnl_ops v_opcode,
1465 enum iavf_status v_retval, u8 *msg, u16 msglen)
1466{
1467 struct net_device *netdev = adapter->netdev;
1468
1469 if (v_opcode == VIRTCHNL_OP_EVENT) {
1470 struct virtchnl_pf_event *vpe =
1471 (struct virtchnl_pf_event *)msg;
1472 bool link_up = iavf_get_vpe_link_status(adapter, vpe);
1473
1474 switch (vpe->event) {
1475 case VIRTCHNL_EVENT_LINK_CHANGE:
1476 iavf_set_adapter_link_speed_from_vpe(adapter, vpe);
1477
1478
1479 if (adapter->link_up == link_up)
1480 break;
1481
1482 if (link_up) {
1483
1484
1485
1486
1487
1488
1489
1490 if (adapter->state != __IAVF_RUNNING)
1491 break;
1492
1493
1494
1495
1496
1497 if (adapter->flags &
1498 IAVF_FLAG_QUEUES_DISABLED)
1499 break;
1500 }
1501
1502 adapter->link_up = link_up;
1503 if (link_up) {
1504 netif_tx_start_all_queues(netdev);
1505 netif_carrier_on(netdev);
1506 } else {
1507 netif_tx_stop_all_queues(netdev);
1508 netif_carrier_off(netdev);
1509 }
1510 iavf_print_link_message(adapter);
1511 break;
1512 case VIRTCHNL_EVENT_RESET_IMPENDING:
1513 dev_info(&adapter->pdev->dev, "Reset warning received from the PF\n");
1514 if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) {
1515 adapter->flags |= IAVF_FLAG_RESET_PENDING;
1516 dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
1517 queue_work(iavf_wq, &adapter->reset_task);
1518 }
1519 break;
1520 default:
1521 dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n",
1522 vpe->event);
1523 break;
1524 }
1525 return;
1526 }
1527 if (v_retval) {
1528 switch (v_opcode) {
1529 case VIRTCHNL_OP_ADD_VLAN:
1530 dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n",
1531 iavf_stat_str(&adapter->hw, v_retval));
1532 break;
1533 case VIRTCHNL_OP_ADD_ETH_ADDR:
1534 dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n",
1535 iavf_stat_str(&adapter->hw, v_retval));
1536 iavf_mac_add_reject(adapter);
1537
1538 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
1539 break;
1540 case VIRTCHNL_OP_DEL_VLAN:
1541 dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n",
1542 iavf_stat_str(&adapter->hw, v_retval));
1543 break;
1544 case VIRTCHNL_OP_DEL_ETH_ADDR:
1545 dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n",
1546 iavf_stat_str(&adapter->hw, v_retval));
1547 break;
1548 case VIRTCHNL_OP_ENABLE_CHANNELS:
1549 dev_err(&adapter->pdev->dev, "Failed to configure queue channels, error %s\n",
1550 iavf_stat_str(&adapter->hw, v_retval));
1551 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
1552 adapter->ch_config.state = __IAVF_TC_INVALID;
1553 netdev_reset_tc(netdev);
1554 netif_tx_start_all_queues(netdev);
1555 break;
1556 case VIRTCHNL_OP_DISABLE_CHANNELS:
1557 dev_err(&adapter->pdev->dev, "Failed to disable queue channels, error %s\n",
1558 iavf_stat_str(&adapter->hw, v_retval));
1559 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
1560 adapter->ch_config.state = __IAVF_TC_RUNNING;
1561 netif_tx_start_all_queues(netdev);
1562 break;
1563 case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
1564 struct iavf_cloud_filter *cf, *cftmp;
1565
1566 list_for_each_entry_safe(cf, cftmp,
1567 &adapter->cloud_filter_list,
1568 list) {
1569 if (cf->state == __IAVF_CF_ADD_PENDING) {
1570 cf->state = __IAVF_CF_INVALID;
1571 dev_info(&adapter->pdev->dev, "Failed to add cloud filter, error %s\n",
1572 iavf_stat_str(&adapter->hw,
1573 v_retval));
1574 iavf_print_cloud_filter(adapter,
1575 &cf->f);
1576 list_del(&cf->list);
1577 kfree(cf);
1578 adapter->num_cloud_filters--;
1579 }
1580 }
1581 }
1582 break;
1583 case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
1584 struct iavf_cloud_filter *cf;
1585
1586 list_for_each_entry(cf, &adapter->cloud_filter_list,
1587 list) {
1588 if (cf->state == __IAVF_CF_DEL_PENDING) {
1589 cf->state = __IAVF_CF_ACTIVE;
1590 dev_info(&adapter->pdev->dev, "Failed to del cloud filter, error %s\n",
1591 iavf_stat_str(&adapter->hw,
1592 v_retval));
1593 iavf_print_cloud_filter(adapter,
1594 &cf->f);
1595 }
1596 }
1597 }
1598 break;
1599 case VIRTCHNL_OP_ADD_FDIR_FILTER: {
1600 struct iavf_fdir_fltr *fdir, *fdir_tmp;
1601
1602 spin_lock_bh(&adapter->fdir_fltr_lock);
1603 list_for_each_entry_safe(fdir, fdir_tmp,
1604 &adapter->fdir_list_head,
1605 list) {
1606 if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) {
1607 dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter, error %s\n",
1608 iavf_stat_str(&adapter->hw,
1609 v_retval));
1610 iavf_print_fdir_fltr(adapter, fdir);
1611 if (msglen)
1612 dev_err(&adapter->pdev->dev,
1613 "%s\n", msg);
1614 list_del(&fdir->list);
1615 kfree(fdir);
1616 adapter->fdir_active_fltr--;
1617 }
1618 }
1619 spin_unlock_bh(&adapter->fdir_fltr_lock);
1620 }
1621 break;
1622 case VIRTCHNL_OP_DEL_FDIR_FILTER: {
1623 struct iavf_fdir_fltr *fdir;
1624
1625 spin_lock_bh(&adapter->fdir_fltr_lock);
1626 list_for_each_entry(fdir, &adapter->fdir_list_head,
1627 list) {
1628 if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
1629 fdir->state = IAVF_FDIR_FLTR_ACTIVE;
1630 dev_info(&adapter->pdev->dev, "Failed to del Flow Director filter, error %s\n",
1631 iavf_stat_str(&adapter->hw,
1632 v_retval));
1633 iavf_print_fdir_fltr(adapter, fdir);
1634 }
1635 }
1636 spin_unlock_bh(&adapter->fdir_fltr_lock);
1637 }
1638 break;
1639 case VIRTCHNL_OP_ADD_RSS_CFG: {
1640 struct iavf_adv_rss *rss, *rss_tmp;
1641
1642 spin_lock_bh(&adapter->adv_rss_lock);
1643 list_for_each_entry_safe(rss, rss_tmp,
1644 &adapter->adv_rss_list_head,
1645 list) {
1646 if (rss->state == IAVF_ADV_RSS_ADD_PENDING) {
1647 iavf_print_adv_rss_cfg(adapter, rss,
1648 "Failed to change the input set for",
1649 NULL);
1650 list_del(&rss->list);
1651 kfree(rss);
1652 }
1653 }
1654 spin_unlock_bh(&adapter->adv_rss_lock);
1655 }
1656 break;
1657 case VIRTCHNL_OP_DEL_RSS_CFG: {
1658 struct iavf_adv_rss *rss;
1659
1660 spin_lock_bh(&adapter->adv_rss_lock);
1661 list_for_each_entry(rss, &adapter->adv_rss_list_head,
1662 list) {
1663 if (rss->state == IAVF_ADV_RSS_DEL_PENDING) {
1664 rss->state = IAVF_ADV_RSS_ACTIVE;
1665 dev_err(&adapter->pdev->dev, "Failed to delete RSS configuration, error %s\n",
1666 iavf_stat_str(&adapter->hw,
1667 v_retval));
1668 }
1669 }
1670 spin_unlock_bh(&adapter->adv_rss_lock);
1671 }
1672 break;
1673 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
1674 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
1675 dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n");
1676 break;
1677 default:
1678 dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
1679 v_retval, iavf_stat_str(&adapter->hw, v_retval),
1680 v_opcode);
1681 }
1682 }
1683 switch (v_opcode) {
1684 case VIRTCHNL_OP_ADD_ETH_ADDR:
1685 if (!v_retval)
1686 iavf_mac_add_ok(adapter);
1687 if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr))
1688 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
1689 break;
1690 case VIRTCHNL_OP_GET_STATS: {
1691 struct iavf_eth_stats *stats =
1692 (struct iavf_eth_stats *)msg;
1693 netdev->stats.rx_packets = stats->rx_unicast +
1694 stats->rx_multicast +
1695 stats->rx_broadcast;
1696 netdev->stats.tx_packets = stats->tx_unicast +
1697 stats->tx_multicast +
1698 stats->tx_broadcast;
1699 netdev->stats.rx_bytes = stats->rx_bytes;
1700 netdev->stats.tx_bytes = stats->tx_bytes;
1701 netdev->stats.tx_errors = stats->tx_errors;
1702 netdev->stats.rx_dropped = stats->rx_discards;
1703 netdev->stats.tx_dropped = stats->tx_discards;
1704 adapter->current_stats = *stats;
1705 }
1706 break;
1707 case VIRTCHNL_OP_GET_VF_RESOURCES: {
1708 u16 len = sizeof(struct virtchnl_vf_resource) +
1709 IAVF_MAX_VF_VSI *
1710 sizeof(struct virtchnl_vsi_resource);
1711 memcpy(adapter->vf_res, msg, min(msglen, len));
1712 iavf_validate_num_queues(adapter);
1713 iavf_vf_parse_hw_config(&adapter->hw, adapter->vf_res);
1714 if (is_zero_ether_addr(adapter->hw.mac.addr)) {
1715
1716 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
1717 } else {
1718
1719 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
1720 ether_addr_copy(netdev->perm_addr,
1721 adapter->hw.mac.addr);
1722 }
1723 spin_lock_bh(&adapter->mac_vlan_list_lock);
1724 iavf_add_filter(adapter, adapter->hw.mac.addr);
1725 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1726 iavf_process_config(adapter);
1727 }
1728 break;
1729 case VIRTCHNL_OP_ENABLE_QUEUES:
1730
1731 iavf_irq_enable(adapter, true);
1732 adapter->flags &= ~IAVF_FLAG_QUEUES_DISABLED;
1733 break;
1734 case VIRTCHNL_OP_DISABLE_QUEUES:
1735 iavf_free_all_tx_resources(adapter);
1736 iavf_free_all_rx_resources(adapter);
1737 if (adapter->state == __IAVF_DOWN_PENDING) {
1738 adapter->state = __IAVF_DOWN;
1739 wake_up(&adapter->down_waitqueue);
1740 }
1741 break;
1742 case VIRTCHNL_OP_VERSION:
1743 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
1744
1745
1746
1747
1748 if (v_opcode != adapter->current_op)
1749 return;
1750 break;
1751 case VIRTCHNL_OP_IWARP:
1752
1753
1754
1755
1756 if (msglen && CLIENT_ENABLED(adapter))
1757 iavf_notify_client_message(&adapter->vsi, msg, msglen);
1758 break;
1759
1760 case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
1761 adapter->client_pending &=
1762 ~(BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP));
1763 break;
1764 case VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
1765 struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;
1766
1767 if (msglen == sizeof(*vrh))
1768 adapter->hena = vrh->hena;
1769 else
1770 dev_warn(&adapter->pdev->dev,
1771 "Invalid message %d from PF\n", v_opcode);
1772 }
1773 break;
1774 case VIRTCHNL_OP_REQUEST_QUEUES: {
1775 struct virtchnl_vf_res_request *vfres =
1776 (struct virtchnl_vf_res_request *)msg;
1777
1778 if (vfres->num_queue_pairs != adapter->num_req_queues) {
1779 dev_info(&adapter->pdev->dev,
1780 "Requested %d queues, PF can support %d\n",
1781 adapter->num_req_queues,
1782 vfres->num_queue_pairs);
1783 adapter->num_req_queues = 0;
1784 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
1785 }
1786 }
1787 break;
1788 case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
1789 struct iavf_cloud_filter *cf;
1790
1791 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1792 if (cf->state == __IAVF_CF_ADD_PENDING)
1793 cf->state = __IAVF_CF_ACTIVE;
1794 }
1795 }
1796 break;
1797 case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
1798 struct iavf_cloud_filter *cf, *cftmp;
1799
1800 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
1801 list) {
1802 if (cf->state == __IAVF_CF_DEL_PENDING) {
1803 cf->state = __IAVF_CF_INVALID;
1804 list_del(&cf->list);
1805 kfree(cf);
1806 adapter->num_cloud_filters--;
1807 }
1808 }
1809 }
1810 break;
1811 case VIRTCHNL_OP_ADD_FDIR_FILTER: {
1812 struct virtchnl_fdir_add *add_fltr = (struct virtchnl_fdir_add *)msg;
1813 struct iavf_fdir_fltr *fdir, *fdir_tmp;
1814
1815 spin_lock_bh(&adapter->fdir_fltr_lock);
1816 list_for_each_entry_safe(fdir, fdir_tmp,
1817 &adapter->fdir_list_head,
1818 list) {
1819 if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) {
1820 if (add_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
1821 dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n",
1822 fdir->loc);
1823 fdir->state = IAVF_FDIR_FLTR_ACTIVE;
1824 fdir->flow_id = add_fltr->flow_id;
1825 } else {
1826 dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter with status: %d\n",
1827 add_fltr->status);
1828 iavf_print_fdir_fltr(adapter, fdir);
1829 list_del(&fdir->list);
1830 kfree(fdir);
1831 adapter->fdir_active_fltr--;
1832 }
1833 }
1834 }
1835 spin_unlock_bh(&adapter->fdir_fltr_lock);
1836 }
1837 break;
1838 case VIRTCHNL_OP_DEL_FDIR_FILTER: {
1839 struct virtchnl_fdir_del *del_fltr = (struct virtchnl_fdir_del *)msg;
1840 struct iavf_fdir_fltr *fdir, *fdir_tmp;
1841
1842 spin_lock_bh(&adapter->fdir_fltr_lock);
1843 list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head,
1844 list) {
1845 if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
1846 if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
1847 dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n",
1848 fdir->loc);
1849 list_del(&fdir->list);
1850 kfree(fdir);
1851 adapter->fdir_active_fltr--;
1852 } else {
1853 fdir->state = IAVF_FDIR_FLTR_ACTIVE;
1854 dev_info(&adapter->pdev->dev, "Failed to delete Flow Director filter with status: %d\n",
1855 del_fltr->status);
1856 iavf_print_fdir_fltr(adapter, fdir);
1857 }
1858 }
1859 }
1860 spin_unlock_bh(&adapter->fdir_fltr_lock);
1861 }
1862 break;
1863 case VIRTCHNL_OP_ADD_RSS_CFG: {
1864 struct iavf_adv_rss *rss;
1865
1866 spin_lock_bh(&adapter->adv_rss_lock);
1867 list_for_each_entry(rss, &adapter->adv_rss_list_head, list) {
1868 if (rss->state == IAVF_ADV_RSS_ADD_PENDING) {
1869 iavf_print_adv_rss_cfg(adapter, rss,
1870 "Input set change for",
1871 "successful");
1872 rss->state = IAVF_ADV_RSS_ACTIVE;
1873 }
1874 }
1875 spin_unlock_bh(&adapter->adv_rss_lock);
1876 }
1877 break;
1878 case VIRTCHNL_OP_DEL_RSS_CFG: {
1879 struct iavf_adv_rss *rss, *rss_tmp;
1880
1881 spin_lock_bh(&adapter->adv_rss_lock);
1882 list_for_each_entry_safe(rss, rss_tmp,
1883 &adapter->adv_rss_list_head, list) {
1884 if (rss->state == IAVF_ADV_RSS_DEL_PENDING) {
1885 list_del(&rss->list);
1886 kfree(rss);
1887 }
1888 }
1889 spin_unlock_bh(&adapter->adv_rss_lock);
1890 }
1891 break;
1892 default:
1893 if (adapter->current_op && (v_opcode != adapter->current_op))
1894 dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
1895 adapter->current_op, v_opcode);
1896 break;
1897 }
1898 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1899}
1900