1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/module.h>
19#include <linux/interrupt.h>
20#include <linux/pci.h>
21#include <net/vxlan.h>
22#include "liquidio_common.h"
23#include "octeon_droq.h"
24#include "octeon_iq.h"
25#include "response_manager.h"
26#include "octeon_device.h"
27#include "octeon_nic.h"
28#include "octeon_main.h"
29#include "octeon_network.h"
30#include "cn23xx_vf_device.h"
31
32MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
33MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Virtual Function Driver");
34MODULE_LICENSE("GPL");
35MODULE_VERSION(LIQUIDIO_VERSION);
36
37static int debug = -1;
38module_param(debug, int, 0644);
39MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
40
41#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
42
43struct liquidio_if_cfg_context {
44 int octeon_id;
45
46 wait_queue_head_t wc;
47
48 int cond;
49};
50
51struct liquidio_if_cfg_resp {
52 u64 rh;
53 struct liquidio_if_cfg_info cfg_info;
54 u64 status;
55};
56
57struct liquidio_rx_ctl_context {
58 int octeon_id;
59
60 wait_queue_head_t wc;
61
62 int cond;
63};
64
65struct oct_timestamp_resp {
66 u64 rh;
67 u64 timestamp;
68 u64 status;
69};
70
71union tx_info {
72 u64 u64;
73 struct {
74#ifdef __BIG_ENDIAN_BITFIELD
75 u16 gso_size;
76 u16 gso_segs;
77 u32 reserved;
78#else
79 u32 reserved;
80 u16 gso_segs;
81 u16 gso_size;
82#endif
83 } s;
84};
85
86#define OCTNIC_MAX_SG (MAX_SKB_FRAGS)
87
88#define OCTNIC_GSO_MAX_HEADER_SIZE 128
89#define OCTNIC_GSO_MAX_SIZE \
90 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
91
92struct octnic_gather {
93
94 struct list_head list;
95
96
97 int sg_size;
98
99
100 int adjust;
101
102
103
104
105 struct octeon_sg_entry *sg;
106
107 dma_addr_t sg_dma_ptr;
108};
109
110static int
111liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
112static void liquidio_vf_remove(struct pci_dev *pdev);
113static int octeon_device_init(struct octeon_device *oct);
114static int liquidio_stop(struct net_device *netdev);
115
116static int lio_wait_for_oq_pkts(struct octeon_device *oct)
117{
118 struct octeon_device_priv *oct_priv =
119 (struct octeon_device_priv *)oct->priv;
120 int retry = MAX_IO_PENDING_PKT_COUNT;
121 int pkt_cnt = 0, pending_pkts;
122 int i;
123
124 do {
125 pending_pkts = 0;
126
127 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
128 if (!(oct->io_qmask.oq & BIT_ULL(i)))
129 continue;
130 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
131 }
132 if (pkt_cnt > 0) {
133 pending_pkts += pkt_cnt;
134 tasklet_schedule(&oct_priv->droq_tasklet);
135 }
136 pkt_cnt = 0;
137 schedule_timeout_uninterruptible(1);
138
139 } while (retry-- && pending_pkts);
140
141 return pkt_cnt;
142}
143
144
145
146
147
148static void pcierror_quiesce_device(struct octeon_device *oct)
149{
150 int i;
151
152
153
154
155
156
157
158 schedule_timeout_uninterruptible(100);
159
160 if (wait_for_pending_requests(oct))
161 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
162
163
164 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
165 struct octeon_instr_queue *iq;
166
167 if (!(oct->io_qmask.iq & BIT_ULL(i)))
168 continue;
169 iq = oct->instr_queue[i];
170
171 if (atomic_read(&iq->instr_pending)) {
172 spin_lock_bh(&iq->lock);
173 iq->fill_cnt = 0;
174 iq->octeon_read_index = iq->host_write_index;
175 iq->stats.instr_processed +=
176 atomic_read(&iq->instr_pending);
177 lio_process_iq_request_list(oct, iq, 0);
178 spin_unlock_bh(&iq->lock);
179 }
180 }
181
182
183 lio_process_ordered_list(oct, 1);
184
185
186}
187
188
189
190
191
192static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
193{
194 u32 status, mask;
195 int pos = 0x100;
196
197 pr_info("%s :\n", __func__);
198
199 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
200 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
201 if (dev->error_state == pci_channel_io_normal)
202 status &= ~mask;
203 else
204 status &= mask;
205 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
206}
207
208
209
210
211
212static void stop_pci_io(struct octeon_device *oct)
213{
214 struct msix_entry *msix_entries;
215 int i;
216
217
218 atomic_set(&oct->status, OCT_DEV_IN_RESET);
219
220 for (i = 0; i < oct->ifcount; i++)
221 netif_device_detach(oct->props[i].netdev);
222
223
224 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
225
226 pcierror_quiesce_device(oct);
227 if (oct->msix_on) {
228 msix_entries = (struct msix_entry *)oct->msix_entries;
229 for (i = 0; i < oct->num_msix_irqs; i++) {
230
231 irq_set_affinity_hint(msix_entries[i].vector,
232 NULL);
233 free_irq(msix_entries[i].vector,
234 &oct->ioq_vector[i]);
235 }
236 pci_disable_msix(oct->pci_dev);
237 kfree(oct->msix_entries);
238 oct->msix_entries = NULL;
239 octeon_free_ioq_vector(oct);
240 }
241 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
242 lio_get_state_string(&oct->status));
243
244
245 cleanup_aer_uncorrect_error_status(oct->pci_dev);
246
247 pci_disable_device(oct->pci_dev);
248}
249
250
251
252
253
254
255
256
257
258static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
259 pci_channel_state_t state)
260{
261 struct octeon_device *oct = pci_get_drvdata(pdev);
262
263
264 if (state == pci_channel_io_normal) {
265 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
266 cleanup_aer_uncorrect_error_status(oct->pci_dev);
267 return PCI_ERS_RESULT_CAN_RECOVER;
268 }
269
270
271 dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
272 stop_pci_io(oct);
273
274 return PCI_ERS_RESULT_DISCONNECT;
275}
276
277
278static const struct pci_error_handlers liquidio_vf_err_handler = {
279 .error_detected = liquidio_pcie_error_detected,
280};
281
282static const struct pci_device_id liquidio_vf_pci_tbl[] = {
283 {
284 PCI_VENDOR_ID_CAVIUM, OCTEON_CN23XX_VF_VID,
285 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
286 },
287 {
288 0, 0, 0, 0, 0, 0, 0
289 }
290};
291MODULE_DEVICE_TABLE(pci, liquidio_vf_pci_tbl);
292
293static struct pci_driver liquidio_vf_pci_driver = {
294 .name = "LiquidIO_VF",
295 .id_table = liquidio_vf_pci_tbl,
296 .probe = liquidio_vf_probe,
297 .remove = liquidio_vf_remove,
298 .err_handler = &liquidio_vf_err_handler,
299};
300
301
302
303
304
305static void txqs_stop(struct net_device *netdev)
306{
307 if (netif_is_multiqueue(netdev)) {
308 int i;
309
310 for (i = 0; i < netdev->num_tx_queues; i++)
311 netif_stop_subqueue(netdev, i);
312 } else {
313 netif_stop_queue(netdev);
314 }
315}
316
317
318
319
320
321static void txqs_start(struct net_device *netdev)
322{
323 if (netif_is_multiqueue(netdev)) {
324 int i;
325
326 for (i = 0; i < netdev->num_tx_queues; i++)
327 netif_start_subqueue(netdev, i);
328 } else {
329 netif_start_queue(netdev);
330 }
331}
332
333
334
335
336
337static void txqs_wake(struct net_device *netdev)
338{
339 struct lio *lio = GET_LIO(netdev);
340
341 if (netif_is_multiqueue(netdev)) {
342 int i;
343
344 for (i = 0; i < netdev->num_tx_queues; i++) {
345 int qno = lio->linfo.txpciq[i % lio->oct_dev->num_iqs]
346 .s.q_no;
347 if (__netif_subqueue_stopped(netdev, i)) {
348 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno,
349 tx_restart, 1);
350 netif_wake_subqueue(netdev, i);
351 }
352 }
353 } else {
354 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
355 tx_restart, 1);
356 netif_wake_queue(netdev);
357 }
358}
359
360
361
362
363
364static void start_txq(struct net_device *netdev)
365{
366 struct lio *lio = GET_LIO(netdev);
367
368 if (lio->linfo.link.s.link_up) {
369 txqs_start(netdev);
370 return;
371 }
372}
373
374
375
376
377
378
379static void wake_q(struct net_device *netdev, int q)
380{
381 if (netif_is_multiqueue(netdev))
382 netif_wake_subqueue(netdev, q);
383 else
384 netif_wake_queue(netdev);
385}
386
387
388
389
390
391
392static void stop_q(struct net_device *netdev, int q)
393{
394 if (netif_is_multiqueue(netdev))
395 netif_stop_subqueue(netdev, q);
396 else
397 netif_stop_queue(netdev);
398}
399
400
401
402
403
404static struct list_head *list_delete_head(struct list_head *root)
405{
406 struct list_head *node;
407
408 if ((root->prev == root) && (root->next == root))
409 node = NULL;
410 else
411 node = root->next;
412
413 if (node)
414 list_del(node);
415
416 return node;
417}
418
419
420
421
422
423static void delete_glists(struct lio *lio)
424{
425 struct octnic_gather *g;
426 int i;
427
428 kfree(lio->glist_lock);
429 lio->glist_lock = NULL;
430
431 if (!lio->glist)
432 return;
433
434 for (i = 0; i < lio->linfo.num_txpciq; i++) {
435 do {
436 g = (struct octnic_gather *)
437 list_delete_head(&lio->glist[i]);
438 kfree(g);
439 } while (g);
440
441 if (lio->glists_virt_base && lio->glists_virt_base[i] &&
442 lio->glists_dma_base && lio->glists_dma_base[i]) {
443 lio_dma_free(lio->oct_dev,
444 lio->glist_entry_size * lio->tx_qsize,
445 lio->glists_virt_base[i],
446 lio->glists_dma_base[i]);
447 }
448 }
449
450 kfree(lio->glists_virt_base);
451 lio->glists_virt_base = NULL;
452
453 kfree(lio->glists_dma_base);
454 lio->glists_dma_base = NULL;
455
456 kfree(lio->glist);
457 lio->glist = NULL;
458}
459
460
461
462
463
464static int setup_glists(struct lio *lio, int num_iqs)
465{
466 struct octnic_gather *g;
467 int i, j;
468
469 lio->glist_lock =
470 kzalloc(sizeof(*lio->glist_lock) * num_iqs, GFP_KERNEL);
471 if (!lio->glist_lock)
472 return -ENOMEM;
473
474 lio->glist =
475 kzalloc(sizeof(*lio->glist) * num_iqs, GFP_KERNEL);
476 if (!lio->glist) {
477 kfree(lio->glist_lock);
478 lio->glist_lock = NULL;
479 return -ENOMEM;
480 }
481
482 lio->glist_entry_size =
483 ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
484
485
486
487
488 lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
489 GFP_KERNEL);
490 lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
491 GFP_KERNEL);
492
493 if (!lio->glists_virt_base || !lio->glists_dma_base) {
494 delete_glists(lio);
495 return -ENOMEM;
496 }
497
498 for (i = 0; i < num_iqs; i++) {
499 spin_lock_init(&lio->glist_lock[i]);
500
501 INIT_LIST_HEAD(&lio->glist[i]);
502
503 lio->glists_virt_base[i] =
504 lio_dma_alloc(lio->oct_dev,
505 lio->glist_entry_size * lio->tx_qsize,
506 &lio->glists_dma_base[i]);
507
508 if (!lio->glists_virt_base[i]) {
509 delete_glists(lio);
510 return -ENOMEM;
511 }
512
513 for (j = 0; j < lio->tx_qsize; j++) {
514 g = kzalloc(sizeof(*g), GFP_KERNEL);
515 if (!g)
516 break;
517
518 g->sg = lio->glists_virt_base[i] +
519 (j * lio->glist_entry_size);
520
521 g->sg_dma_ptr = lio->glists_dma_base[i] +
522 (j * lio->glist_entry_size);
523
524 list_add_tail(&g->list, &lio->glist[i]);
525 }
526
527 if (j != lio->tx_qsize) {
528 delete_glists(lio);
529 return -ENOMEM;
530 }
531 }
532
533 return 0;
534}
535
536
537
538
539
540static void print_link_info(struct net_device *netdev)
541{
542 struct lio *lio = GET_LIO(netdev);
543
544 if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
545 ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
546 struct oct_link_info *linfo = &lio->linfo;
547
548 if (linfo->link.s.link_up) {
549 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
550 linfo->link.s.speed,
551 (linfo->link.s.duplex) ? "Full" : "Half");
552 } else {
553 netif_info(lio, link, lio->netdev, "Link Down\n");
554 }
555 }
556}
557
558
559
560
561
562static void octnet_link_status_change(struct work_struct *work)
563{
564 struct cavium_wk *wk = (struct cavium_wk *)work;
565 struct lio *lio = (struct lio *)wk->ctxptr;
566
567 rtnl_lock();
568 call_netdevice_notifiers(NETDEV_CHANGEMTU, lio->netdev);
569 rtnl_unlock();
570}
571
572
573
574
575
576static int setup_link_status_change_wq(struct net_device *netdev)
577{
578 struct lio *lio = GET_LIO(netdev);
579 struct octeon_device *oct = lio->oct_dev;
580
581 lio->link_status_wq.wq = alloc_workqueue("link-status",
582 WQ_MEM_RECLAIM, 0);
583 if (!lio->link_status_wq.wq) {
584 dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
585 return -1;
586 }
587 INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
588 octnet_link_status_change);
589 lio->link_status_wq.wk.ctxptr = lio;
590
591 return 0;
592}
593
594static void cleanup_link_status_change_wq(struct net_device *netdev)
595{
596 struct lio *lio = GET_LIO(netdev);
597
598 if (lio->link_status_wq.wq) {
599 cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
600 destroy_workqueue(lio->link_status_wq.wq);
601 }
602}
603
604
605
606
607
608
609
610
611
612static void update_link_status(struct net_device *netdev,
613 union oct_link_status *ls)
614{
615 struct lio *lio = GET_LIO(netdev);
616 struct octeon_device *oct = lio->oct_dev;
617
618 if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) {
619 lio->linfo.link.u64 = ls->u64;
620
621 print_link_info(netdev);
622 lio->link_changes++;
623
624 if (lio->linfo.link.s.link_up) {
625 netif_carrier_on(netdev);
626 txqs_wake(netdev);
627 } else {
628 netif_carrier_off(netdev);
629 txqs_stop(netdev);
630 }
631
632 if (lio->linfo.link.s.mtu != netdev->max_mtu) {
633 dev_info(&oct->pci_dev->dev, "Max MTU Changed from %d to %d\n",
634 netdev->max_mtu, lio->linfo.link.s.mtu);
635 netdev->max_mtu = lio->linfo.link.s.mtu;
636 }
637
638 if (lio->linfo.link.s.mtu < netdev->mtu) {
639 dev_warn(&oct->pci_dev->dev,
640 "PF has changed the MTU for gmx port. Reducing the mtu from %d to %d\n",
641 netdev->mtu, lio->linfo.link.s.mtu);
642 lio->mtu = lio->linfo.link.s.mtu;
643 netdev->mtu = lio->linfo.link.s.mtu;
644 queue_delayed_work(lio->link_status_wq.wq,
645 &lio->link_status_wq.wk.work, 0);
646 }
647 }
648}
649
650
651
652
653
654
655static int
656liquidio_vf_probe(struct pci_dev *pdev,
657 const struct pci_device_id *ent __attribute__((unused)))
658{
659 struct octeon_device *oct_dev = NULL;
660
661 oct_dev = octeon_allocate_device(pdev->device,
662 sizeof(struct octeon_device_priv));
663
664 if (!oct_dev) {
665 dev_err(&pdev->dev, "Unable to allocate device\n");
666 return -ENOMEM;
667 }
668 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
669
670 dev_info(&pdev->dev, "Initializing device %x:%x.\n",
671 (u32)pdev->vendor, (u32)pdev->device);
672
673
674 pci_set_drvdata(pdev, oct_dev);
675
676
677 oct_dev->pci_dev = pdev;
678
679 if (octeon_device_init(oct_dev)) {
680 liquidio_vf_remove(pdev);
681 return -ENOMEM;
682 }
683
684 dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
685
686 return 0;
687}
688
689
690
691
692
693static void octeon_pci_flr(struct octeon_device *oct)
694{
695 pci_save_state(oct->pci_dev);
696
697 pci_cfg_access_lock(oct->pci_dev);
698
699
700 pci_write_config_word(oct->pci_dev, PCI_COMMAND,
701 PCI_COMMAND_INTX_DISABLE);
702
703 pcie_flr(oct->pci_dev);
704
705 pci_cfg_access_unlock(oct->pci_dev);
706
707 pci_restore_state(oct->pci_dev);
708}
709
710
711
712
713
714
715static void octeon_destroy_resources(struct octeon_device *oct)
716{
717 struct msix_entry *msix_entries;
718 int i;
719
720 switch (atomic_read(&oct->status)) {
721 case OCT_DEV_RUNNING:
722 case OCT_DEV_CORE_OK:
723
724 atomic_set(&oct->status, OCT_DEV_IN_RESET);
725
726 oct->app_mode = CVM_DRV_INVALID_APP;
727 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
728 lio_get_state_string(&oct->status));
729
730 schedule_timeout_uninterruptible(HZ / 10);
731
732
733 case OCT_DEV_HOST_OK:
734
735 case OCT_DEV_IO_QUEUES_DONE:
736 if (wait_for_pending_requests(oct))
737 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
738
739 if (lio_wait_for_instr_fetch(oct))
740 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
741
742
743
744
745
746 oct->fn_list.disable_io_queues(oct);
747
748 if (lio_wait_for_oq_pkts(oct))
749 dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
750
751 case OCT_DEV_INTR_SET_DONE:
752
753 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
754
755 if (oct->msix_on) {
756 msix_entries = (struct msix_entry *)oct->msix_entries;
757 for (i = 0; i < oct->num_msix_irqs; i++) {
758 if (oct->ioq_vector[i].vector) {
759 irq_set_affinity_hint(
760 msix_entries[i].vector,
761 NULL);
762 free_irq(msix_entries[i].vector,
763 &oct->ioq_vector[i]);
764 oct->ioq_vector[i].vector = 0;
765 }
766 }
767 pci_disable_msix(oct->pci_dev);
768 kfree(oct->msix_entries);
769 oct->msix_entries = NULL;
770 kfree(oct->irq_name_storage);
771 oct->irq_name_storage = NULL;
772 }
773
774 if (oct->pci_dev->reset_fn)
775 octeon_pci_flr(oct);
776 else
777 cn23xx_vf_ask_pf_to_do_flr(oct);
778
779
780 case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
781 octeon_free_ioq_vector(oct);
782
783
784 case OCT_DEV_MBOX_SETUP_DONE:
785 oct->fn_list.free_mbox(oct);
786
787
788 case OCT_DEV_IN_RESET:
789 case OCT_DEV_DROQ_INIT_DONE:
790 mdelay(100);
791 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
792 if (!(oct->io_qmask.oq & BIT_ULL(i)))
793 continue;
794 octeon_delete_droq(oct, i);
795 }
796
797
798 case OCT_DEV_RESP_LIST_INIT_DONE:
799 octeon_delete_response_list(oct);
800
801
802 case OCT_DEV_INSTR_QUEUE_INIT_DONE:
803 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
804 if (!(oct->io_qmask.iq & BIT_ULL(i)))
805 continue;
806 octeon_delete_instr_queue(oct, i);
807 }
808
809
810 case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
811 octeon_free_sc_buffer_pool(oct);
812
813
814 case OCT_DEV_DISPATCH_INIT_DONE:
815 octeon_delete_dispatch_list(oct);
816 cancel_delayed_work_sync(&oct->nic_poll_work.work);
817
818
819 case OCT_DEV_PCI_MAP_DONE:
820 octeon_unmap_pci_barx(oct, 0);
821 octeon_unmap_pci_barx(oct, 1);
822
823
824 case OCT_DEV_PCI_ENABLE_DONE:
825 pci_clear_master(oct->pci_dev);
826
827 pci_disable_device(oct->pci_dev);
828
829
830 case OCT_DEV_BEGIN_STATE:
831
832 break;
833 }
834}
835
836
837
838
839
840
841static void rx_ctl_callback(struct octeon_device *oct,
842 u32 status, void *buf)
843{
844 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
845 struct liquidio_rx_ctl_context *ctx;
846
847 ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr;
848
849 oct = lio_get_device(ctx->octeon_id);
850 if (status)
851 dev_err(&oct->pci_dev->dev, "rx ctl instruction failed. Status: %llx\n",
852 CVM_CAST64(status));
853 WRITE_ONCE(ctx->cond, 1);
854
855
856
857
858 wmb();
859
860 wake_up_interruptible(&ctx->wc);
861}
862
863
864
865
866
867
868static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
869{
870 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
871 int ctx_size = sizeof(struct liquidio_rx_ctl_context);
872 struct liquidio_rx_ctl_context *ctx;
873 struct octeon_soft_command *sc;
874 union octnet_cmd *ncmd;
875 int retval;
876
877 if (oct->props[lio->ifidx].rx_on == start_stop)
878 return;
879
880 sc = (struct octeon_soft_command *)
881 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
882 16, ctx_size);
883
884 ncmd = (union octnet_cmd *)sc->virtdptr;
885 ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr;
886
887 WRITE_ONCE(ctx->cond, 0);
888 ctx->octeon_id = lio_get_device_id(oct);
889 init_waitqueue_head(&ctx->wc);
890
891 ncmd->u64 = 0;
892 ncmd->s.cmd = OCTNET_CMD_RX_CTL;
893 ncmd->s.param1 = start_stop;
894
895 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
896
897 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
898
899 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
900 OPCODE_NIC_CMD, 0, 0, 0);
901
902 sc->callback = rx_ctl_callback;
903 sc->callback_arg = sc;
904 sc->wait_time = 5000;
905
906 retval = octeon_send_soft_command(oct, sc);
907 if (retval == IQ_SEND_FAILED) {
908 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
909 } else {
910
911
912
913 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR)
914 return;
915 oct->props[lio->ifidx].rx_on = start_stop;
916 }
917
918 octeon_free_soft_command(oct, sc);
919}
920
921
922
923
924
925
926
927
928
929static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
930{
931 struct net_device *netdev = oct->props[ifidx].netdev;
932 struct napi_struct *napi, *n;
933 struct lio *lio;
934
935 if (!netdev) {
936 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
937 __func__, ifidx);
938 return;
939 }
940
941 lio = GET_LIO(netdev);
942
943 dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
944
945 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
946 liquidio_stop(netdev);
947
948 if (oct->props[lio->ifidx].napi_enabled == 1) {
949 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
950 napi_disable(napi);
951
952 oct->props[lio->ifidx].napi_enabled = 0;
953
954 oct->droq[0]->ops.poll_mode = 0;
955 }
956
957
958 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
959 netif_napi_del(napi);
960
961 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
962 unregister_netdev(netdev);
963
964 cleanup_rx_oom_poll_fn(netdev);
965
966 cleanup_link_status_change_wq(netdev);
967
968 delete_glists(lio);
969
970 free_netdev(netdev);
971
972 oct->props[ifidx].gmxport = -1;
973
974 oct->props[ifidx].netdev = NULL;
975}
976
977
978
979
980
981static int liquidio_stop_nic_module(struct octeon_device *oct)
982{
983 struct lio *lio;
984 int i, j;
985
986 dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
987 if (!oct->ifcount) {
988 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
989 return 1;
990 }
991
992 spin_lock_bh(&oct->cmd_resp_wqlock);
993 oct->cmd_resp_state = OCT_DRV_OFFLINE;
994 spin_unlock_bh(&oct->cmd_resp_wqlock);
995
996 for (i = 0; i < oct->ifcount; i++) {
997 lio = GET_LIO(oct->props[i].netdev);
998 for (j = 0; j < oct->num_oqs; j++)
999 octeon_unregister_droq_ops(oct,
1000 lio->linfo.rxpciq[j].s.q_no);
1001 }
1002
1003 for (i = 0; i < oct->ifcount; i++)
1004 liquidio_destroy_nic_device(oct, i);
1005
1006 dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
1007 return 0;
1008}
1009
1010
1011
1012
1013
1014static void liquidio_vf_remove(struct pci_dev *pdev)
1015{
1016 struct octeon_device *oct_dev = pci_get_drvdata(pdev);
1017
1018 dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
1019
1020 if (oct_dev->app_mode == CVM_DRV_NIC_APP)
1021 liquidio_stop_nic_module(oct_dev);
1022
1023
1024
1025
1026 octeon_destroy_resources(oct_dev);
1027
1028 dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
1029
1030
1031
1032
1033 octeon_free_device_mem(oct_dev);
1034}
1035
1036
1037
1038
1039
1040static int octeon_pci_os_setup(struct octeon_device *oct)
1041{
1042#ifdef CONFIG_PCI_IOV
1043
1044 if (!oct->pci_dev->physfn)
1045 octeon_pci_flr(oct);
1046#endif
1047
1048 if (pci_enable_device(oct->pci_dev)) {
1049 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
1050 return 1;
1051 }
1052
1053 if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
1054 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
1055 pci_disable_device(oct->pci_dev);
1056 return 1;
1057 }
1058
1059
1060 pci_set_master(oct->pci_dev);
1061
1062 return 0;
1063}
1064
1065static int skb_iq(struct lio *lio, struct sk_buff *skb)
1066{
1067 int q = 0;
1068
1069 if (netif_is_multiqueue(lio->netdev))
1070 q = skb->queue_mapping % lio->linfo.num_txpciq;
1071
1072 return q;
1073}
1074
1075
1076
1077
1078
1079
1080static int check_txq_state(struct lio *lio, struct sk_buff *skb)
1081{
1082 int q = 0, iq = 0;
1083
1084 if (netif_is_multiqueue(lio->netdev)) {
1085 q = skb->queue_mapping;
1086 iq = lio->linfo.txpciq[q % lio->oct_dev->num_iqs].s.q_no;
1087 } else {
1088 iq = lio->txq;
1089 q = iq;
1090 }
1091
1092 if (octnet_iq_is_full(lio->oct_dev, iq))
1093 return 0;
1094
1095 if (__netif_subqueue_stopped(lio->netdev, q)) {
1096 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1);
1097 wake_q(lio->netdev, q);
1098 }
1099
1100 return 1;
1101}
1102
1103
1104
1105
1106
1107static void free_netbuf(void *buf)
1108{
1109 struct octnet_buf_free_info *finfo;
1110 struct sk_buff *skb;
1111 struct lio *lio;
1112
1113 finfo = (struct octnet_buf_free_info *)buf;
1114 skb = finfo->skb;
1115 lio = finfo->lio;
1116
1117 dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
1118 DMA_TO_DEVICE);
1119
1120 check_txq_state(lio, skb);
1121
1122 tx_buffer_free(skb);
1123}
1124
1125
1126
1127
1128
1129static void free_netsgbuf(void *buf)
1130{
1131 struct octnet_buf_free_info *finfo;
1132 struct octnic_gather *g;
1133 struct sk_buff *skb;
1134 int i, frags, iq;
1135 struct lio *lio;
1136
1137 finfo = (struct octnet_buf_free_info *)buf;
1138 skb = finfo->skb;
1139 lio = finfo->lio;
1140 g = finfo->g;
1141 frags = skb_shinfo(skb)->nr_frags;
1142
1143 dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1144 g->sg[0].ptr[0], (skb->len - skb->data_len),
1145 DMA_TO_DEVICE);
1146
1147 i = 1;
1148 while (frags--) {
1149 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1150
1151 pci_unmap_page((lio->oct_dev)->pci_dev,
1152 g->sg[(i >> 2)].ptr[(i & 3)],
1153 frag->size, DMA_TO_DEVICE);
1154 i++;
1155 }
1156
1157 iq = skb_iq(lio, skb);
1158
1159 spin_lock(&lio->glist_lock[iq]);
1160 list_add_tail(&g->list, &lio->glist[iq]);
1161 spin_unlock(&lio->glist_lock[iq]);
1162
1163 check_txq_state(lio, skb);
1164
1165 tx_buffer_free(skb);
1166}
1167
1168
1169
1170
1171
1172static void free_netsgbuf_with_resp(void *buf)
1173{
1174 struct octnet_buf_free_info *finfo;
1175 struct octeon_soft_command *sc;
1176 struct octnic_gather *g;
1177 struct sk_buff *skb;
1178 int i, frags, iq;
1179 struct lio *lio;
1180
1181 sc = (struct octeon_soft_command *)buf;
1182 skb = (struct sk_buff *)sc->callback_arg;
1183 finfo = (struct octnet_buf_free_info *)&skb->cb;
1184
1185 lio = finfo->lio;
1186 g = finfo->g;
1187 frags = skb_shinfo(skb)->nr_frags;
1188
1189 dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1190 g->sg[0].ptr[0], (skb->len - skb->data_len),
1191 DMA_TO_DEVICE);
1192
1193 i = 1;
1194 while (frags--) {
1195 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1196
1197 pci_unmap_page((lio->oct_dev)->pci_dev,
1198 g->sg[(i >> 2)].ptr[(i & 3)],
1199 frag->size, DMA_TO_DEVICE);
1200 i++;
1201 }
1202
1203 iq = skb_iq(lio, skb);
1204
1205 spin_lock(&lio->glist_lock[iq]);
1206 list_add_tail(&g->list, &lio->glist[iq]);
1207 spin_unlock(&lio->glist_lock[iq]);
1208
1209
1210
1211 check_txq_state(lio, skb);
1212}
1213
1214
1215
1216
1217
1218
1219static void if_cfg_callback(struct octeon_device *oct,
1220 u32 status __attribute__((unused)), void *buf)
1221{
1222 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
1223 struct liquidio_if_cfg_context *ctx;
1224 struct liquidio_if_cfg_resp *resp;
1225
1226 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
1227 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
1228
1229 oct = lio_get_device(ctx->octeon_id);
1230 if (resp->status)
1231 dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n",
1232 CVM_CAST64(resp->status));
1233 WRITE_ONCE(ctx->cond, 1);
1234
1235 snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
1236 resp->cfg_info.liquidio_firmware_version);
1237
1238
1239
1240
1241 wmb();
1242
1243 wake_up_interruptible(&ctx->wc);
1244}
1245
1246
1247
1248
1249
1250static int liquidio_open(struct net_device *netdev)
1251{
1252 struct lio *lio = GET_LIO(netdev);
1253 struct octeon_device *oct = lio->oct_dev;
1254 struct napi_struct *napi, *n;
1255
1256 if (!oct->props[lio->ifidx].napi_enabled) {
1257 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1258 napi_enable(napi);
1259
1260 oct->props[lio->ifidx].napi_enabled = 1;
1261
1262 oct->droq[0]->ops.poll_mode = 1;
1263 }
1264
1265 ifstate_set(lio, LIO_IFSTATE_RUNNING);
1266
1267
1268 lio->intf_open = 1;
1269
1270 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
1271 start_txq(netdev);
1272
1273
1274 send_rx_ctrl_cmd(lio, 1);
1275
1276 dev_info(&oct->pci_dev->dev, "%s interface is opened\n", netdev->name);
1277
1278 return 0;
1279}
1280
1281
1282
1283
1284
1285static int liquidio_stop(struct net_device *netdev)
1286{
1287 struct lio *lio = GET_LIO(netdev);
1288 struct octeon_device *oct = lio->oct_dev;
1289 struct napi_struct *napi, *n;
1290
1291
1292 send_rx_ctrl_cmd(lio, 0);
1293
1294 if (oct->props[lio->ifidx].napi_enabled) {
1295 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1296 napi_disable(napi);
1297
1298 oct->props[lio->ifidx].napi_enabled = 0;
1299
1300 oct->droq[0]->ops.poll_mode = 0;
1301 }
1302
1303 netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n");
1304
1305 lio->intf_open = 0;
1306 lio->linfo.link.s.link_up = 0;
1307
1308 netif_carrier_off(netdev);
1309 lio->link_changes++;
1310
1311 ifstate_reset(lio, LIO_IFSTATE_RUNNING);
1312
1313 txqs_stop(netdev);
1314
1315 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
1316
1317 return 0;
1318}
1319
1320
1321
1322
1323
1324
1325
1326
1327static enum octnet_ifflags get_new_flags(struct net_device *netdev)
1328{
1329 enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
1330
1331 if (netdev->flags & IFF_PROMISC)
1332 f |= OCTNET_IFFLAG_PROMISC;
1333
1334 if (netdev->flags & IFF_ALLMULTI)
1335 f |= OCTNET_IFFLAG_ALLMULTI;
1336
1337 if (netdev->flags & IFF_MULTICAST) {
1338 f |= OCTNET_IFFLAG_MULTICAST;
1339
1340
1341
1342
1343 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
1344 f |= OCTNET_IFFLAG_ALLMULTI;
1345 }
1346
1347 if (netdev->flags & IFF_BROADCAST)
1348 f |= OCTNET_IFFLAG_BROADCAST;
1349
1350 return f;
1351}
1352
1353static void liquidio_set_uc_list(struct net_device *netdev)
1354{
1355 struct lio *lio = GET_LIO(netdev);
1356 struct octeon_device *oct = lio->oct_dev;
1357 struct octnic_ctrl_pkt nctrl;
1358 struct netdev_hw_addr *ha;
1359 u64 *mac;
1360
1361 if (lio->netdev_uc_count == netdev_uc_count(netdev))
1362 return;
1363
1364 if (netdev_uc_count(netdev) > MAX_NCTRL_UDD) {
1365 dev_err(&oct->pci_dev->dev, "too many MAC addresses in netdev uc list\n");
1366 return;
1367 }
1368
1369 lio->netdev_uc_count = netdev_uc_count(netdev);
1370
1371 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1372 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_UC_LIST;
1373 nctrl.ncmd.s.more = lio->netdev_uc_count;
1374 nctrl.ncmd.s.param1 = oct->vf_num;
1375 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1376 nctrl.netpndev = (u64)netdev;
1377 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1378
1379
1380 mac = &nctrl.udd[0];
1381 netdev_for_each_uc_addr(ha, netdev) {
1382 ether_addr_copy(((u8 *)mac) + 2, ha->addr);
1383 mac++;
1384 }
1385
1386 octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1387}
1388
1389
1390
1391
1392
1393static void liquidio_set_mcast_list(struct net_device *netdev)
1394{
1395 int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
1396 struct lio *lio = GET_LIO(netdev);
1397 struct octeon_device *oct = lio->oct_dev;
1398 struct octnic_ctrl_pkt nctrl;
1399 struct netdev_hw_addr *ha;
1400 u64 *mc;
1401 int ret;
1402
1403 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1404
1405
1406 nctrl.ncmd.u64 = 0;
1407 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
1408 nctrl.ncmd.s.param1 = get_new_flags(netdev);
1409 nctrl.ncmd.s.param2 = mc_count;
1410 nctrl.ncmd.s.more = mc_count;
1411 nctrl.netpndev = (u64)netdev;
1412 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1413
1414
1415 mc = &nctrl.udd[0];
1416 netdev_for_each_mc_addr(ha, netdev) {
1417 *mc = 0;
1418 ether_addr_copy(((u8 *)mc) + 2, ha->addr);
1419
1420 if (++mc > &nctrl.udd[mc_count])
1421 break;
1422 }
1423
1424 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1425
1426
1427
1428
1429 nctrl.wait_time = 0;
1430
1431 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1432 if (ret < 0) {
1433 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
1434 ret);
1435 }
1436
1437 liquidio_set_uc_list(netdev);
1438}
1439
1440
1441
1442
1443
1444static int liquidio_set_mac(struct net_device *netdev, void *p)
1445{
1446 struct sockaddr *addr = (struct sockaddr *)p;
1447 struct lio *lio = GET_LIO(netdev);
1448 struct octeon_device *oct = lio->oct_dev;
1449 struct octnic_ctrl_pkt nctrl;
1450 int ret = 0;
1451
1452 if (!is_valid_ether_addr(addr->sa_data))
1453 return -EADDRNOTAVAIL;
1454
1455 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
1456 return 0;
1457
1458 if (lio->linfo.macaddr_is_admin_asgnd)
1459 return -EPERM;
1460
1461 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1462
1463 nctrl.ncmd.u64 = 0;
1464 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
1465 nctrl.ncmd.s.param1 = 0;
1466 nctrl.ncmd.s.more = 1;
1467 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1468 nctrl.netpndev = (u64)netdev;
1469 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1470 nctrl.wait_time = 100;
1471
1472 nctrl.udd[0] = 0;
1473
1474 ether_addr_copy((u8 *)&nctrl.udd[0] + 2, addr->sa_data);
1475
1476 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1477 if (ret < 0) {
1478 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
1479 return -ENOMEM;
1480 }
1481 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1482 ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data);
1483
1484 return 0;
1485}
1486
1487
1488
1489
1490
1491static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
1492{
1493 struct lio *lio = GET_LIO(netdev);
1494 struct net_device_stats *stats = &netdev->stats;
1495 u64 pkts = 0, drop = 0, bytes = 0;
1496 struct oct_droq_stats *oq_stats;
1497 struct oct_iq_stats *iq_stats;
1498 struct octeon_device *oct;
1499 int i, iq_no, oq_no;
1500
1501 oct = lio->oct_dev;
1502
1503 if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
1504 return stats;
1505
1506 for (i = 0; i < oct->num_iqs; i++) {
1507 iq_no = lio->linfo.txpciq[i].s.q_no;
1508 iq_stats = &oct->instr_queue[iq_no]->stats;
1509 pkts += iq_stats->tx_done;
1510 drop += iq_stats->tx_dropped;
1511 bytes += iq_stats->tx_tot_bytes;
1512 }
1513
1514 stats->tx_packets = pkts;
1515 stats->tx_bytes = bytes;
1516 stats->tx_dropped = drop;
1517
1518 pkts = 0;
1519 drop = 0;
1520 bytes = 0;
1521
1522 for (i = 0; i < oct->num_oqs; i++) {
1523 oq_no = lio->linfo.rxpciq[i].s.q_no;
1524 oq_stats = &oct->droq[oq_no]->stats;
1525 pkts += oq_stats->rx_pkts_received;
1526 drop += (oq_stats->rx_dropped +
1527 oq_stats->dropped_nodispatch +
1528 oq_stats->dropped_toomany +
1529 oq_stats->dropped_nomem);
1530 bytes += oq_stats->rx_bytes_received;
1531 }
1532
1533 stats->rx_bytes = bytes;
1534 stats->rx_packets = pkts;
1535 stats->rx_dropped = drop;
1536
1537 return stats;
1538}
1539
1540
1541
1542
1543
1544static int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
1545{
1546 struct octnic_ctrl_pkt nctrl;
1547 struct octeon_device *oct;
1548 struct lio *lio;
1549 int ret = 0;
1550
1551 lio = GET_LIO(netdev);
1552 oct = lio->oct_dev;
1553
1554 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1555
1556 nctrl.ncmd.u64 = 0;
1557 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MTU;
1558 nctrl.ncmd.s.param1 = new_mtu;
1559 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1560 nctrl.wait_time = LIO_CMD_WAIT_TM;
1561 nctrl.netpndev = (u64)netdev;
1562 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1563
1564 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1565 if (ret < 0) {
1566 dev_err(&oct->pci_dev->dev, "Failed to set MTU\n");
1567 return -EIO;
1568 }
1569
1570 lio->mtu = new_mtu;
1571
1572 return 0;
1573}
1574
1575
1576
1577
1578
1579
1580
1581static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
1582{
1583 struct lio *lio = GET_LIO(netdev);
1584 struct hwtstamp_config conf;
1585
1586 if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
1587 return -EFAULT;
1588
1589 if (conf.flags)
1590 return -EINVAL;
1591
1592 switch (conf.tx_type) {
1593 case HWTSTAMP_TX_ON:
1594 case HWTSTAMP_TX_OFF:
1595 break;
1596 default:
1597 return -ERANGE;
1598 }
1599
1600 switch (conf.rx_filter) {
1601 case HWTSTAMP_FILTER_NONE:
1602 break;
1603 case HWTSTAMP_FILTER_ALL:
1604 case HWTSTAMP_FILTER_SOME:
1605 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1606 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1607 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1608 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1609 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1610 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1611 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1612 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1613 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1614 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1615 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1616 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1617 case HWTSTAMP_FILTER_NTP_ALL:
1618 conf.rx_filter = HWTSTAMP_FILTER_ALL;
1619 break;
1620 default:
1621 return -ERANGE;
1622 }
1623
1624 if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
1625 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
1626
1627 else
1628 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
1629
1630 return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
1631}
1632
1633
1634
1635
1636
1637
1638
1639static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1640{
1641 switch (cmd) {
1642 case SIOCSHWTSTAMP:
1643 return hwtstamp_ioctl(netdev, ifr);
1644 default:
1645 return -EOPNOTSUPP;
1646 }
1647}
1648
1649static void handle_timestamp(struct octeon_device *oct, u32 status, void *buf)
1650{
1651 struct sk_buff *skb = (struct sk_buff *)buf;
1652 struct octnet_buf_free_info *finfo;
1653 struct oct_timestamp_resp *resp;
1654 struct octeon_soft_command *sc;
1655 struct lio *lio;
1656
1657 finfo = (struct octnet_buf_free_info *)skb->cb;
1658 lio = finfo->lio;
1659 sc = finfo->sc;
1660 oct = lio->oct_dev;
1661 resp = (struct oct_timestamp_resp *)sc->virtrptr;
1662
1663 if (status != OCTEON_REQUEST_DONE) {
1664 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
1665 CVM_CAST64(status));
1666 resp->timestamp = 0;
1667 }
1668
1669 octeon_swap_8B_data(&resp->timestamp, 1);
1670
1671 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
1672 struct skb_shared_hwtstamps ts;
1673 u64 ns = resp->timestamp;
1674
1675 netif_info(lio, tx_done, lio->netdev,
1676 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
1677 skb, (unsigned long long)ns);
1678 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
1679 skb_tstamp_tx(skb, &ts);
1680 }
1681
1682 octeon_free_soft_command(oct, sc);
1683 tx_buffer_free(skb);
1684}
1685
1686
1687
1688
1689
1690
1691static int send_nic_timestamp_pkt(struct octeon_device *oct,
1692 struct octnic_data_pkt *ndata,
1693 struct octnet_buf_free_info *finfo,
1694 int xmit_more)
1695{
1696 struct octeon_soft_command *sc;
1697 int ring_doorbell;
1698 struct lio *lio;
1699 int retval;
1700 u32 len;
1701
1702 lio = finfo->lio;
1703
1704 sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
1705 sizeof(struct oct_timestamp_resp));
1706 finfo->sc = sc;
1707
1708 if (!sc) {
1709 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
1710 return IQ_SEND_FAILED;
1711 }
1712
1713 if (ndata->reqtype == REQTYPE_NORESP_NET)
1714 ndata->reqtype = REQTYPE_RESP_NET;
1715 else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
1716 ndata->reqtype = REQTYPE_RESP_NET_SG;
1717
1718 sc->callback = handle_timestamp;
1719 sc->callback_arg = finfo->skb;
1720 sc->iq_no = ndata->q_no;
1721
1722 len = (u32)((struct octeon_instr_ih3 *)(&sc->cmd.cmd3.ih3))->dlengsz;
1723
1724 ring_doorbell = !xmit_more;
1725
1726 retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
1727 sc, len, ndata->reqtype);
1728
1729 if (retval == IQ_SEND_FAILED) {
1730 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
1731 retval);
1732 octeon_free_soft_command(oct, sc);
1733 } else {
1734 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
1735 }
1736
1737 return retval;
1738}
1739
1740
1741
1742
1743
1744
1745
1746static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
1747{
1748 struct octnet_buf_free_info *finfo;
1749 union octnic_cmd_setup cmdsetup;
1750 struct octnic_data_pkt ndata;
1751 struct octeon_instr_irh *irh;
1752 struct oct_iq_stats *stats;
1753 struct octeon_device *oct;
1754 int q_idx = 0, iq_no = 0;
1755 union tx_info *tx_info;
1756 int xmit_more = 0;
1757 struct lio *lio;
1758 int status = 0;
1759 u64 dptr = 0;
1760 u32 tag = 0;
1761 int j;
1762
1763 lio = GET_LIO(netdev);
1764 oct = lio->oct_dev;
1765
1766 if (netif_is_multiqueue(netdev)) {
1767 q_idx = skb->queue_mapping;
1768 q_idx = (q_idx % (lio->linfo.num_txpciq));
1769 tag = q_idx;
1770 iq_no = lio->linfo.txpciq[q_idx].s.q_no;
1771 } else {
1772 iq_no = lio->txq;
1773 }
1774
1775 stats = &oct->instr_queue[iq_no]->stats;
1776
1777
1778
1779
1780 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
1781 (!lio->linfo.link.s.link_up) || (skb->len <= 0)) {
1782 netif_info(lio, tx_err, lio->netdev, "Transmit failed link_status : %d\n",
1783 lio->linfo.link.s.link_up);
1784 goto lio_xmit_failed;
1785 }
1786
1787
1788
1789
1790 finfo = (struct octnet_buf_free_info *)skb->cb;
1791 finfo->lio = lio;
1792 finfo->skb = skb;
1793 finfo->sc = NULL;
1794
1795
1796 memset(&ndata, 0, sizeof(struct octnic_data_pkt));
1797
1798 ndata.buf = finfo;
1799
1800 ndata.q_no = iq_no;
1801
1802 if (netif_is_multiqueue(netdev)) {
1803 if (octnet_iq_is_full(oct, ndata.q_no)) {
1804
1805 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
1806 ndata.q_no);
1807 stats->tx_iq_busy++;
1808 return NETDEV_TX_BUSY;
1809 }
1810 } else {
1811 if (octnet_iq_is_full(oct, lio->txq)) {
1812
1813 stats->tx_iq_busy++;
1814 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
1815 ndata.q_no);
1816 return NETDEV_TX_BUSY;
1817 }
1818 }
1819
1820 ndata.datasize = skb->len;
1821
1822 cmdsetup.u64 = 0;
1823 cmdsetup.s.iq_no = iq_no;
1824
1825 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1826 if (skb->encapsulation) {
1827 cmdsetup.s.tnl_csum = 1;
1828 stats->tx_vxlan++;
1829 } else {
1830 cmdsetup.s.transport_csum = 1;
1831 }
1832 }
1833 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
1834 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1835 cmdsetup.s.timestamp = 1;
1836 }
1837
1838 if (!skb_shinfo(skb)->nr_frags) {
1839 cmdsetup.s.u.datasize = skb->len;
1840 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
1841
1842 dptr = dma_map_single(&oct->pci_dev->dev,
1843 skb->data,
1844 skb->len,
1845 DMA_TO_DEVICE);
1846 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
1847 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
1848 __func__);
1849 return NETDEV_TX_BUSY;
1850 }
1851
1852 ndata.cmd.cmd3.dptr = dptr;
1853 finfo->dptr = dptr;
1854 ndata.reqtype = REQTYPE_NORESP_NET;
1855
1856 } else {
1857 struct skb_frag_struct *frag;
1858 struct octnic_gather *g;
1859 int i, frags;
1860
1861 spin_lock(&lio->glist_lock[q_idx]);
1862 g = (struct octnic_gather *)list_delete_head(
1863 &lio->glist[q_idx]);
1864 spin_unlock(&lio->glist_lock[q_idx]);
1865
1866 if (!g) {
1867 netif_info(lio, tx_err, lio->netdev,
1868 "Transmit scatter gather: glist null!\n");
1869 goto lio_xmit_failed;
1870 }
1871
1872 cmdsetup.s.gather = 1;
1873 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
1874 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
1875
1876 memset(g->sg, 0, g->sg_size);
1877
1878 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
1879 skb->data,
1880 (skb->len - skb->data_len),
1881 DMA_TO_DEVICE);
1882 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
1883 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
1884 __func__);
1885 return NETDEV_TX_BUSY;
1886 }
1887 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
1888
1889 frags = skb_shinfo(skb)->nr_frags;
1890 i = 1;
1891 while (frags--) {
1892 frag = &skb_shinfo(skb)->frags[i - 1];
1893
1894 g->sg[(i >> 2)].ptr[(i & 3)] =
1895 dma_map_page(&oct->pci_dev->dev,
1896 frag->page.p,
1897 frag->page_offset,
1898 frag->size,
1899 DMA_TO_DEVICE);
1900 if (dma_mapping_error(&oct->pci_dev->dev,
1901 g->sg[i >> 2].ptr[i & 3])) {
1902 dma_unmap_single(&oct->pci_dev->dev,
1903 g->sg[0].ptr[0],
1904 skb->len - skb->data_len,
1905 DMA_TO_DEVICE);
1906 for (j = 1; j < i; j++) {
1907 frag = &skb_shinfo(skb)->frags[j - 1];
1908 dma_unmap_page(&oct->pci_dev->dev,
1909 g->sg[j >> 2].ptr[j & 3],
1910 frag->size,
1911 DMA_TO_DEVICE);
1912 }
1913 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
1914 __func__);
1915 return NETDEV_TX_BUSY;
1916 }
1917
1918 add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
1919 i++;
1920 }
1921
1922 dptr = g->sg_dma_ptr;
1923
1924 ndata.cmd.cmd3.dptr = dptr;
1925 finfo->dptr = dptr;
1926 finfo->g = g;
1927
1928 ndata.reqtype = REQTYPE_NORESP_NET_SG;
1929 }
1930
1931 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
1932 tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
1933
1934 if (skb_shinfo(skb)->gso_size) {
1935 tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
1936 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
1937 }
1938
1939
1940 if (skb_vlan_tag_present(skb)) {
1941 irh->priority = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT;
1942 irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK;
1943 }
1944
1945 xmit_more = skb->xmit_more;
1946
1947 if (unlikely(cmdsetup.s.timestamp))
1948 status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
1949 else
1950 status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
1951 if (status == IQ_SEND_FAILED)
1952 goto lio_xmit_failed;
1953
1954 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
1955
1956 if (status == IQ_SEND_STOP) {
1957 dev_err(&oct->pci_dev->dev, "Rcvd IQ_SEND_STOP signal; stopping IQ-%d\n",
1958 iq_no);
1959 stop_q(netdev, q_idx);
1960 }
1961
1962 netif_trans_update(netdev);
1963
1964 if (tx_info->s.gso_segs)
1965 stats->tx_done += tx_info->s.gso_segs;
1966 else
1967 stats->tx_done++;
1968 stats->tx_tot_bytes += ndata.datasize;
1969
1970 return NETDEV_TX_OK;
1971
1972lio_xmit_failed:
1973 stats->tx_dropped++;
1974 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
1975 iq_no, stats->tx_dropped);
1976 if (dptr)
1977 dma_unmap_single(&oct->pci_dev->dev, dptr,
1978 ndata.datasize, DMA_TO_DEVICE);
1979
1980 octeon_ring_doorbell_locked(oct, iq_no);
1981
1982 tx_buffer_free(skb);
1983 return NETDEV_TX_OK;
1984}
1985
1986
1987
1988
1989static void liquidio_tx_timeout(struct net_device *netdev)
1990{
1991 struct lio *lio;
1992
1993 lio = GET_LIO(netdev);
1994
1995 netif_info(lio, tx_err, lio->netdev,
1996 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
1997 netdev->stats.tx_dropped);
1998 netif_trans_update(netdev);
1999 txqs_wake(netdev);
2000}
2001
2002static int
2003liquidio_vlan_rx_add_vid(struct net_device *netdev,
2004 __be16 proto __attribute__((unused)), u16 vid)
2005{
2006 struct lio *lio = GET_LIO(netdev);
2007 struct octeon_device *oct = lio->oct_dev;
2008 struct octnic_ctrl_pkt nctrl;
2009 struct completion compl;
2010 u16 response_code;
2011 int ret = 0;
2012
2013 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2014
2015 nctrl.ncmd.u64 = 0;
2016 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2017 nctrl.ncmd.s.param1 = vid;
2018 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2019 nctrl.wait_time = 100;
2020 nctrl.netpndev = (u64)netdev;
2021 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2022 init_completion(&compl);
2023 nctrl.completion = &compl;
2024 nctrl.response_code = &response_code;
2025
2026 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2027 if (ret < 0) {
2028 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
2029 ret);
2030 return -EIO;
2031 }
2032
2033 if (!wait_for_completion_timeout(&compl,
2034 msecs_to_jiffies(nctrl.wait_time)))
2035 return -EPERM;
2036
2037 if (READ_ONCE(response_code))
2038 return -EPERM;
2039
2040 return 0;
2041}
2042
2043static int
2044liquidio_vlan_rx_kill_vid(struct net_device *netdev,
2045 __be16 proto __attribute__((unused)), u16 vid)
2046{
2047 struct lio *lio = GET_LIO(netdev);
2048 struct octeon_device *oct = lio->oct_dev;
2049 struct octnic_ctrl_pkt nctrl;
2050 int ret = 0;
2051
2052 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2053
2054 nctrl.ncmd.u64 = 0;
2055 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2056 nctrl.ncmd.s.param1 = vid;
2057 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2058 nctrl.wait_time = 100;
2059 nctrl.netpndev = (u64)netdev;
2060 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2061
2062 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2063 if (ret < 0) {
2064 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
2065 ret);
2066 }
2067 return ret;
2068}
2069
2070
2071
2072
2073
2074
2075
2076
2077static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
2078 u8 rx_cmd)
2079{
2080 struct lio *lio = GET_LIO(netdev);
2081 struct octeon_device *oct = lio->oct_dev;
2082 struct octnic_ctrl_pkt nctrl;
2083 int ret = 0;
2084
2085 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2086
2087 nctrl.ncmd.u64 = 0;
2088 nctrl.ncmd.s.cmd = command;
2089 nctrl.ncmd.s.param1 = rx_cmd;
2090 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2091 nctrl.wait_time = 100;
2092 nctrl.netpndev = (u64)netdev;
2093 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2094
2095 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2096 if (ret < 0) {
2097 dev_err(&oct->pci_dev->dev, "DEVFLAGS RXCSUM change failed in core (ret:0x%x)\n",
2098 ret);
2099 }
2100 return ret;
2101}
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
2112 u16 vxlan_port, u8 vxlan_cmd_bit)
2113{
2114 struct lio *lio = GET_LIO(netdev);
2115 struct octeon_device *oct = lio->oct_dev;
2116 struct octnic_ctrl_pkt nctrl;
2117 int ret = 0;
2118
2119 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2120
2121 nctrl.ncmd.u64 = 0;
2122 nctrl.ncmd.s.cmd = command;
2123 nctrl.ncmd.s.more = vxlan_cmd_bit;
2124 nctrl.ncmd.s.param1 = vxlan_port;
2125 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2126 nctrl.wait_time = 100;
2127 nctrl.netpndev = (u64)netdev;
2128 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2129
2130 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2131 if (ret < 0) {
2132 dev_err(&oct->pci_dev->dev,
2133 "DEVFLAGS VxLAN port add/delete failed in core (ret : 0x%x)\n",
2134 ret);
2135 }
2136 return ret;
2137}
2138
2139
2140
2141
2142
2143
2144static netdev_features_t liquidio_fix_features(struct net_device *netdev,
2145 netdev_features_t request)
2146{
2147 struct lio *lio = netdev_priv(netdev);
2148
2149 if ((request & NETIF_F_RXCSUM) &&
2150 !(lio->dev_capability & NETIF_F_RXCSUM))
2151 request &= ~NETIF_F_RXCSUM;
2152
2153 if ((request & NETIF_F_HW_CSUM) &&
2154 !(lio->dev_capability & NETIF_F_HW_CSUM))
2155 request &= ~NETIF_F_HW_CSUM;
2156
2157 if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
2158 request &= ~NETIF_F_TSO;
2159
2160 if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
2161 request &= ~NETIF_F_TSO6;
2162
2163 if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
2164 request &= ~NETIF_F_LRO;
2165
2166
2167 if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
2168 (lio->dev_capability & NETIF_F_LRO))
2169 request &= ~NETIF_F_LRO;
2170
2171 return request;
2172}
2173
2174
2175
2176
2177
2178static int liquidio_set_features(struct net_device *netdev,
2179 netdev_features_t features)
2180{
2181 struct lio *lio = netdev_priv(netdev);
2182
2183 if (!((netdev->features ^ features) & NETIF_F_LRO))
2184 return 0;
2185
2186 if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO))
2187 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
2188 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2189 else if (!(features & NETIF_F_LRO) &&
2190 (lio->dev_capability & NETIF_F_LRO))
2191 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
2192 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2193 if (!(netdev->features & NETIF_F_RXCSUM) &&
2194 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2195 (features & NETIF_F_RXCSUM))
2196 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
2197 OCTNET_CMD_RXCSUM_ENABLE);
2198 else if ((netdev->features & NETIF_F_RXCSUM) &&
2199 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2200 !(features & NETIF_F_RXCSUM))
2201 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
2202 OCTNET_CMD_RXCSUM_DISABLE);
2203
2204 return 0;
2205}
2206
2207static void liquidio_add_vxlan_port(struct net_device *netdev,
2208 struct udp_tunnel_info *ti)
2209{
2210 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2211 return;
2212
2213 liquidio_vxlan_port_command(netdev,
2214 OCTNET_CMD_VXLAN_PORT_CONFIG,
2215 htons(ti->port),
2216 OCTNET_CMD_VXLAN_PORT_ADD);
2217}
2218
2219static void liquidio_del_vxlan_port(struct net_device *netdev,
2220 struct udp_tunnel_info *ti)
2221{
2222 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2223 return;
2224
2225 liquidio_vxlan_port_command(netdev,
2226 OCTNET_CMD_VXLAN_PORT_CONFIG,
2227 htons(ti->port),
2228 OCTNET_CMD_VXLAN_PORT_DEL);
2229}
2230
2231static const struct net_device_ops lionetdevops = {
2232 .ndo_open = liquidio_open,
2233 .ndo_stop = liquidio_stop,
2234 .ndo_start_xmit = liquidio_xmit,
2235 .ndo_get_stats = liquidio_get_stats,
2236 .ndo_set_mac_address = liquidio_set_mac,
2237 .ndo_set_rx_mode = liquidio_set_mcast_list,
2238 .ndo_tx_timeout = liquidio_tx_timeout,
2239 .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid,
2240 .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid,
2241 .ndo_change_mtu = liquidio_change_mtu,
2242 .ndo_do_ioctl = liquidio_ioctl,
2243 .ndo_fix_features = liquidio_fix_features,
2244 .ndo_set_features = liquidio_set_features,
2245 .ndo_udp_tunnel_add = liquidio_add_vxlan_port,
2246 .ndo_udp_tunnel_del = liquidio_del_vxlan_port,
2247};
2248
2249static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
2250{
2251 struct octeon_device *oct = (struct octeon_device *)buf;
2252 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
2253 union oct_link_status *ls;
2254 int gmxport = 0;
2255 int i;
2256
2257 if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
2258 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
2259 recv_pkt->buffer_size[0],
2260 recv_pkt->rh.r_nic_info.gmxport);
2261 goto nic_info_err;
2262 }
2263
2264 gmxport = recv_pkt->rh.r_nic_info.gmxport;
2265 ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
2266 OCT_DROQ_INFO_SIZE);
2267
2268 octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
2269
2270 for (i = 0; i < oct->ifcount; i++) {
2271 if (oct->props[i].gmxport == gmxport) {
2272 update_link_status(oct->props[i].netdev, ls);
2273 break;
2274 }
2275 }
2276
2277nic_info_err:
2278 for (i = 0; i < recv_pkt->buffer_count; i++)
2279 recv_buffer_free(recv_pkt->buffer_ptr[i]);
2280 octeon_free_recv_info(recv_info);
2281 return 0;
2282}
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292static int setup_nic_devices(struct octeon_device *octeon_dev)
2293{
2294 int retval, num_iqueues, num_oqueues;
2295 struct liquidio_if_cfg_context *ctx;
2296 u32 resp_size, ctx_size, data_size;
2297 struct liquidio_if_cfg_resp *resp;
2298 struct octeon_soft_command *sc;
2299 union oct_nic_if_cfg if_cfg;
2300 struct octdev_props *props;
2301 struct net_device *netdev;
2302 struct lio_version *vdata;
2303 struct lio *lio = NULL;
2304 u8 mac[ETH_ALEN], i, j;
2305 u32 ifidx_or_pfnum;
2306
2307 ifidx_or_pfnum = octeon_dev->pf_num;
2308
2309
2310 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, OPCODE_NIC_INFO,
2311 lio_nic_info, octeon_dev);
2312
2313
2314
2315
2316 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
2317 free_netbuf);
2318
2319 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
2320 free_netsgbuf);
2321
2322 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
2323 free_netsgbuf_with_resp);
2324
2325 for (i = 0; i < octeon_dev->ifcount; i++) {
2326 resp_size = sizeof(struct liquidio_if_cfg_resp);
2327 ctx_size = sizeof(struct liquidio_if_cfg_context);
2328 data_size = sizeof(struct lio_version);
2329 sc = (struct octeon_soft_command *)
2330 octeon_alloc_soft_command(octeon_dev, data_size,
2331 resp_size, ctx_size);
2332 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
2333 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
2334 vdata = (struct lio_version *)sc->virtdptr;
2335
2336 *((u64 *)vdata) = 0;
2337 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
2338 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
2339 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
2340
2341 WRITE_ONCE(ctx->cond, 0);
2342 ctx->octeon_id = lio_get_device_id(octeon_dev);
2343 init_waitqueue_head(&ctx->wc);
2344
2345 if_cfg.u64 = 0;
2346
2347 if_cfg.s.num_iqueues = octeon_dev->sriov_info.rings_per_vf;
2348 if_cfg.s.num_oqueues = octeon_dev->sriov_info.rings_per_vf;
2349 if_cfg.s.base_queue = 0;
2350
2351 sc->iq_no = 0;
2352
2353 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
2354 OPCODE_NIC_IF_CFG, 0, if_cfg.u64,
2355 0);
2356
2357 sc->callback = if_cfg_callback;
2358 sc->callback_arg = sc;
2359 sc->wait_time = 5000;
2360
2361 retval = octeon_send_soft_command(octeon_dev, sc);
2362 if (retval == IQ_SEND_FAILED) {
2363 dev_err(&octeon_dev->pci_dev->dev,
2364 "iq/oq config failed status: %x\n", retval);
2365
2366 goto setup_nic_dev_fail;
2367 }
2368
2369
2370
2371
2372 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
2373 dev_err(&octeon_dev->pci_dev->dev, "Wait interrupted\n");
2374 goto setup_nic_wait_intr;
2375 }
2376
2377 retval = resp->status;
2378 if (retval) {
2379 dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
2380 goto setup_nic_dev_fail;
2381 }
2382
2383 octeon_swap_8B_data((u64 *)(&resp->cfg_info),
2384 (sizeof(struct liquidio_if_cfg_info)) >> 3);
2385
2386 num_iqueues = hweight64(resp->cfg_info.iqmask);
2387 num_oqueues = hweight64(resp->cfg_info.oqmask);
2388
2389 if (!(num_iqueues) || !(num_oqueues)) {
2390 dev_err(&octeon_dev->pci_dev->dev,
2391 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
2392 resp->cfg_info.iqmask, resp->cfg_info.oqmask);
2393 goto setup_nic_dev_fail;
2394 }
2395 dev_dbg(&octeon_dev->pci_dev->dev,
2396 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
2397 i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
2398 num_iqueues, num_oqueues);
2399
2400 netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues);
2401
2402 if (!netdev) {
2403 dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
2404 goto setup_nic_dev_fail;
2405 }
2406
2407 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
2408
2409
2410
2411
2412 netdev->netdev_ops = &lionetdevops;
2413
2414 lio = GET_LIO(netdev);
2415
2416 memset(lio, 0, sizeof(struct lio));
2417
2418 lio->ifidx = ifidx_or_pfnum;
2419
2420 props = &octeon_dev->props[i];
2421 props->gmxport = resp->cfg_info.linfo.gmxport;
2422 props->netdev = netdev;
2423
2424 lio->linfo.num_rxpciq = num_oqueues;
2425 lio->linfo.num_txpciq = num_iqueues;
2426
2427 for (j = 0; j < num_oqueues; j++) {
2428 lio->linfo.rxpciq[j].u64 =
2429 resp->cfg_info.linfo.rxpciq[j].u64;
2430 }
2431 for (j = 0; j < num_iqueues; j++) {
2432 lio->linfo.txpciq[j].u64 =
2433 resp->cfg_info.linfo.txpciq[j].u64;
2434 }
2435
2436 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
2437 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
2438 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
2439 lio->linfo.macaddr_is_admin_asgnd =
2440 resp->cfg_info.linfo.macaddr_is_admin_asgnd;
2441
2442 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
2443
2444 lio->dev_capability = NETIF_F_HIGHDMA
2445 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
2446 | NETIF_F_SG | NETIF_F_RXCSUM
2447 | NETIF_F_TSO | NETIF_F_TSO6
2448 | NETIF_F_GRO
2449 | NETIF_F_LRO;
2450 netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
2451
2452
2453
2454
2455 lio->enc_dev_capability = NETIF_F_IP_CSUM
2456 | NETIF_F_IPV6_CSUM
2457 | NETIF_F_GSO_UDP_TUNNEL
2458 | NETIF_F_HW_CSUM | NETIF_F_SG
2459 | NETIF_F_RXCSUM
2460 | NETIF_F_TSO | NETIF_F_TSO6
2461 | NETIF_F_LRO;
2462
2463 netdev->hw_enc_features =
2464 (lio->enc_dev_capability & ~NETIF_F_LRO);
2465 netdev->vlan_features = lio->dev_capability;
2466
2467 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
2468 NETIF_F_HW_VLAN_CTAG_RX |
2469 NETIF_F_HW_VLAN_CTAG_TX;
2470
2471 netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
2472
2473 netdev->hw_features = lio->dev_capability;
2474
2475
2476 netdev->min_mtu = LIO_MIN_MTU_SIZE;
2477 netdev->max_mtu = LIO_MAX_MTU_SIZE;
2478
2479
2480
2481
2482 lio->oct_dev = octeon_dev;
2483 lio->octprops = props;
2484 lio->netdev = netdev;
2485
2486 dev_dbg(&octeon_dev->pci_dev->dev,
2487 "if%d gmx: %d hw_addr: 0x%llx\n", i,
2488 lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
2489
2490
2491 octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
2492 for (j = 0; j < ETH_ALEN; j++)
2493 mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
2494
2495
2496 ether_addr_copy(netdev->dev_addr, mac);
2497
2498 if (liquidio_setup_io_queues(octeon_dev, i,
2499 lio->linfo.num_txpciq,
2500 lio->linfo.num_rxpciq)) {
2501 dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
2502 goto setup_nic_dev_fail;
2503 }
2504
2505 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
2506
2507
2508
2509
2510 octeon_dev->fn_list.enable_interrupt(octeon_dev,
2511 OCTEON_ALL_INTR);
2512
2513
2514
2515
2516 lio->txq = lio->linfo.txpciq[0].s.q_no;
2517 lio->rxq = lio->linfo.rxpciq[0].s.q_no;
2518
2519 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
2520 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
2521
2522 if (setup_glists(lio, num_iqueues)) {
2523 dev_err(&octeon_dev->pci_dev->dev,
2524 "Gather list allocation failed\n");
2525 goto setup_nic_dev_fail;
2526 }
2527
2528
2529 liquidio_set_ethtool_ops(netdev);
2530 if (lio->oct_dev->chip_id == OCTEON_CN23XX_VF_VID)
2531 octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
2532 else
2533 octeon_dev->priv_flags = 0x0;
2534
2535 if (netdev->features & NETIF_F_LRO)
2536 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
2537 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2538
2539 if (setup_link_status_change_wq(netdev))
2540 goto setup_nic_dev_fail;
2541
2542 if (setup_rx_oom_poll_fn(netdev))
2543 goto setup_nic_dev_fail;
2544
2545
2546 if (register_netdev(netdev)) {
2547 dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
2548 goto setup_nic_dev_fail;
2549 }
2550
2551 dev_dbg(&octeon_dev->pci_dev->dev,
2552 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
2553 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
2554 netif_carrier_off(netdev);
2555 lio->link_changes++;
2556
2557 ifstate_set(lio, LIO_IFSTATE_REGISTERED);
2558
2559
2560
2561
2562
2563 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
2564 OCTNET_CMD_RXCSUM_ENABLE);
2565 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
2566 OCTNET_CMD_TXCSUM_ENABLE);
2567
2568 dev_dbg(&octeon_dev->pci_dev->dev,
2569 "NIC ifidx:%d Setup successful\n", i);
2570
2571 octeon_free_soft_command(octeon_dev, sc);
2572 }
2573
2574 return 0;
2575
2576setup_nic_dev_fail:
2577
2578 octeon_free_soft_command(octeon_dev, sc);
2579
2580setup_nic_wait_intr:
2581
2582 while (i--) {
2583 dev_err(&octeon_dev->pci_dev->dev,
2584 "NIC ifidx:%d Setup failed\n", i);
2585 liquidio_destroy_nic_device(octeon_dev, i);
2586 }
2587 return -ENODEV;
2588}
2589
2590
2591
2592
2593
2594
2595
2596
2597static int liquidio_init_nic_module(struct octeon_device *oct)
2598{
2599 int num_nic_ports = 1;
2600 int i, retval = 0;
2601
2602 dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
2603
2604
2605
2606
2607 oct->ifcount = num_nic_ports;
2608 memset(oct->props, 0,
2609 sizeof(struct octdev_props) * num_nic_ports);
2610
2611 for (i = 0; i < MAX_OCTEON_LINKS; i++)
2612 oct->props[i].gmxport = -1;
2613
2614 retval = setup_nic_devices(oct);
2615 if (retval) {
2616 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
2617 goto octnet_init_failure;
2618 }
2619
2620 dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
2621
2622 return retval;
2623
2624octnet_init_failure:
2625
2626 oct->ifcount = 0;
2627
2628 return retval;
2629}
2630
2631
2632
2633
2634
2635static int octeon_device_init(struct octeon_device *oct)
2636{
2637 u32 rev_id;
2638 int j;
2639
2640 atomic_set(&oct->status, OCT_DEV_BEGIN_STATE);
2641
2642
2643
2644
2645 if (octeon_pci_os_setup(oct))
2646 return 1;
2647 atomic_set(&oct->status, OCT_DEV_PCI_ENABLE_DONE);
2648
2649 oct->chip_id = OCTEON_CN23XX_VF_VID;
2650 pci_read_config_dword(oct->pci_dev, 8, &rev_id);
2651 oct->rev_id = rev_id & 0xff;
2652
2653 if (cn23xx_setup_octeon_vf_device(oct))
2654 return 1;
2655
2656 atomic_set(&oct->status, OCT_DEV_PCI_MAP_DONE);
2657
2658 oct->app_mode = CVM_DRV_NIC_APP;
2659
2660
2661
2662
2663 if (octeon_init_dispatch_list(oct))
2664 return 1;
2665
2666 atomic_set(&oct->status, OCT_DEV_DISPATCH_INIT_DONE);
2667
2668 if (octeon_set_io_queues_off(oct)) {
2669 dev_err(&oct->pci_dev->dev, "setting io queues off failed\n");
2670 return 1;
2671 }
2672
2673 if (oct->fn_list.setup_device_regs(oct)) {
2674 dev_err(&oct->pci_dev->dev, "device registers configuration failed\n");
2675 return 1;
2676 }
2677
2678
2679 if (octeon_setup_sc_buffer_pool(oct)) {
2680 dev_err(&oct->pci_dev->dev, "sc buffer pool allocation failed\n");
2681 return 1;
2682 }
2683 atomic_set(&oct->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
2684
2685
2686 if (octeon_setup_instr_queues(oct)) {
2687 dev_err(&oct->pci_dev->dev, "instruction queue initialization failed\n");
2688 return 1;
2689 }
2690 atomic_set(&oct->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
2691
2692
2693
2694
2695 if (octeon_setup_response_list(oct)) {
2696 dev_err(&oct->pci_dev->dev, "Response list allocation failed\n");
2697 return 1;
2698 }
2699 atomic_set(&oct->status, OCT_DEV_RESP_LIST_INIT_DONE);
2700
2701 if (octeon_setup_output_queues(oct)) {
2702 dev_err(&oct->pci_dev->dev, "Output queue initialization failed\n");
2703 return 1;
2704 }
2705 atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);
2706
2707 if (oct->fn_list.setup_mbox(oct)) {
2708 dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n");
2709 return 1;
2710 }
2711 atomic_set(&oct->status, OCT_DEV_MBOX_SETUP_DONE);
2712
2713 if (octeon_allocate_ioq_vector(oct)) {
2714 dev_err(&oct->pci_dev->dev, "ioq vector allocation failed\n");
2715 return 1;
2716 }
2717 atomic_set(&oct->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
2718
2719 dev_info(&oct->pci_dev->dev, "OCTEON_CN23XX VF Version: %s, %d ioqs\n",
2720 LIQUIDIO_VERSION, oct->sriov_info.rings_per_vf);
2721
2722
2723 if (octeon_setup_interrupt(oct, oct->sriov_info.rings_per_vf))
2724 return 1;
2725
2726 atomic_set(&oct->status, OCT_DEV_INTR_SET_DONE);
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
2737
2738 if (cn23xx_octeon_pfvf_handshake(oct))
2739 return 1;
2740
2741
2742
2743
2744
2745
2746 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
2747
2748
2749
2750 if (oct->fn_list.enable_io_queues(oct)) {
2751 dev_err(&oct->pci_dev->dev, "enabling io queues failed\n");
2752 return 1;
2753 }
2754
2755 atomic_set(&oct->status, OCT_DEV_IO_QUEUES_DONE);
2756
2757 atomic_set(&oct->status, OCT_DEV_HOST_OK);
2758
2759
2760
2761
2762 for (j = 0; j < oct->num_oqs; j++)
2763 writel(oct->droq[j]->max_count, oct->droq[j]->pkts_credit_reg);
2764
2765
2766
2767 atomic_set(&oct->status, OCT_DEV_CORE_OK);
2768
2769 atomic_set(&oct->status, OCT_DEV_RUNNING);
2770
2771 if (liquidio_init_nic_module(oct))
2772 return 1;
2773
2774 return 0;
2775}
2776
2777static int __init liquidio_vf_init(void)
2778{
2779 octeon_init_device_list(0);
2780 return pci_register_driver(&liquidio_vf_pci_driver);
2781}
2782
2783static void __exit liquidio_vf_exit(void)
2784{
2785 pci_unregister_driver(&liquidio_vf_pci_driver);
2786
2787 pr_info("LiquidIO_VF network module is now unloaded\n");
2788}
2789
2790module_init(liquidio_vf_init);
2791module_exit(liquidio_vf_exit);
2792