1
2
3
4
5
6
7
8
9
10
11#include <linux/module.h>
12#include <linux/interrupt.h>
13#include <linux/pci.h>
14#include <linux/etherdevice.h>
15#include <linux/of.h>
16#include <linux/if_vlan.h>
17#include <linux/iommu.h>
18#include <net/ip.h>
19
20#include "otx2_reg.h"
21#include "otx2_common.h"
22#include "otx2_txrx.h"
23#include "otx2_struct.h"
24
25#define DRV_NAME "octeontx2-nicpf"
26#define DRV_STRING "Marvell OcteonTX2 NIC Physical Function Driver"
27#define DRV_VERSION "1.0"
28
29
30static const struct pci_device_id otx2_pf_id_table[] = {
31 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_PF) },
32 { 0, }
33};
34
35MODULE_AUTHOR("Marvell International Ltd.");
36MODULE_DESCRIPTION(DRV_STRING);
37MODULE_LICENSE("GPL v2");
38MODULE_VERSION(DRV_VERSION);
39MODULE_DEVICE_TABLE(pci, otx2_pf_id_table);
40
41enum {
42 TYPE_PFAF,
43 TYPE_PFVF,
44};
45
46static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
47{
48 bool if_up = netif_running(netdev);
49 int err = 0;
50
51 if (if_up)
52 otx2_stop(netdev);
53
54 netdev_info(netdev, "Changing MTU from %d to %d\n",
55 netdev->mtu, new_mtu);
56 netdev->mtu = new_mtu;
57
58 if (if_up)
59 err = otx2_open(netdev);
60
61 return err;
62}
63
64static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
65 int first, int mdevs, u64 intr, int type)
66{
67 struct otx2_mbox_dev *mdev;
68 struct otx2_mbox *mbox;
69 struct mbox_hdr *hdr;
70 int i;
71
72 for (i = first; i < mdevs; i++) {
73
74 if (!(intr & BIT_ULL(i - first)))
75 continue;
76
77 mbox = &mw->mbox;
78 mdev = &mbox->dev[i];
79 if (type == TYPE_PFAF)
80 otx2_sync_mbox_bbuf(mbox, i);
81 hdr = mdev->mbase + mbox->rx_start;
82
83
84
85
86
87
88
89 if (hdr->num_msgs) {
90 mw[i].num_msgs = hdr->num_msgs;
91 hdr->num_msgs = 0;
92 if (type == TYPE_PFAF)
93 memset(mbox->hwbase + mbox->rx_start, 0,
94 ALIGN(sizeof(struct mbox_hdr),
95 sizeof(u64)));
96
97 queue_work(mbox_wq, &mw[i].mbox_wrk);
98 }
99
100 mbox = &mw->mbox_up;
101 mdev = &mbox->dev[i];
102 if (type == TYPE_PFAF)
103 otx2_sync_mbox_bbuf(mbox, i);
104 hdr = mdev->mbase + mbox->rx_start;
105 if (hdr->num_msgs) {
106 mw[i].up_num_msgs = hdr->num_msgs;
107 hdr->num_msgs = 0;
108 if (type == TYPE_PFAF)
109 memset(mbox->hwbase + mbox->rx_start, 0,
110 ALIGN(sizeof(struct mbox_hdr),
111 sizeof(u64)));
112
113 queue_work(mbox_wq, &mw[i].mbox_up_wrk);
114 }
115 }
116}
117
118static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
119 struct mbox_msghdr *msg)
120{
121 if (msg->id >= MBOX_MSG_MAX) {
122 dev_err(pf->dev,
123 "Mbox msg with unknown ID 0x%x\n", msg->id);
124 return;
125 }
126
127 if (msg->sig != OTX2_MBOX_RSP_SIG) {
128 dev_err(pf->dev,
129 "Mbox msg with wrong signature %x, ID 0x%x\n",
130 msg->sig, msg->id);
131 return;
132 }
133
134 switch (msg->id) {
135 case MBOX_MSG_READY:
136 pf->pcifunc = msg->pcifunc;
137 break;
138 case MBOX_MSG_MSIX_OFFSET:
139 mbox_handler_msix_offset(pf, (struct msix_offset_rsp *)msg);
140 break;
141 case MBOX_MSG_NPA_LF_ALLOC:
142 mbox_handler_npa_lf_alloc(pf, (struct npa_lf_alloc_rsp *)msg);
143 break;
144 case MBOX_MSG_NIX_LF_ALLOC:
145 mbox_handler_nix_lf_alloc(pf, (struct nix_lf_alloc_rsp *)msg);
146 break;
147 case MBOX_MSG_NIX_TXSCH_ALLOC:
148 mbox_handler_nix_txsch_alloc(pf,
149 (struct nix_txsch_alloc_rsp *)msg);
150 break;
151 case MBOX_MSG_CGX_STATS:
152 mbox_handler_cgx_stats(pf, (struct cgx_stats_rsp *)msg);
153 break;
154 default:
155 if (msg->rc)
156 dev_err(pf->dev,
157 "Mbox msg response has err %d, ID 0x%x\n",
158 msg->rc, msg->id);
159 break;
160 }
161}
162
163static void otx2_pfaf_mbox_handler(struct work_struct *work)
164{
165 struct otx2_mbox_dev *mdev;
166 struct mbox_hdr *rsp_hdr;
167 struct mbox_msghdr *msg;
168 struct otx2_mbox *mbox;
169 struct mbox *af_mbox;
170 struct otx2_nic *pf;
171 int offset, id;
172
173 af_mbox = container_of(work, struct mbox, mbox_wrk);
174 mbox = &af_mbox->mbox;
175 mdev = &mbox->dev[0];
176 rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
177
178 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
179 pf = af_mbox->pfvf;
180
181 for (id = 0; id < af_mbox->num_msgs; id++) {
182 msg = (struct mbox_msghdr *)(mdev->mbase + offset);
183 otx2_process_pfaf_mbox_msg(pf, msg);
184 offset = mbox->rx_start + msg->next_msgoff;
185 mdev->msgs_acked++;
186 }
187
188 otx2_mbox_reset(mbox, 0);
189}
190
191static void otx2_handle_link_event(struct otx2_nic *pf)
192{
193 struct cgx_link_user_info *linfo = &pf->linfo;
194 struct net_device *netdev = pf->netdev;
195
196 pr_info("%s NIC Link is %s %d Mbps %s duplex\n", netdev->name,
197 linfo->link_up ? "UP" : "DOWN", linfo->speed,
198 linfo->full_duplex ? "Full" : "Half");
199 if (linfo->link_up) {
200 netif_carrier_on(netdev);
201 netif_tx_start_all_queues(netdev);
202 } else {
203 netif_tx_stop_all_queues(netdev);
204 netif_carrier_off(netdev);
205 }
206}
207
208int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf,
209 struct cgx_link_info_msg *msg,
210 struct msg_rsp *rsp)
211{
212
213 pf->linfo = msg->link_info;
214
215
216 if (pf->flags & OTX2_FLAG_INTF_DOWN)
217 return 0;
218
219 otx2_handle_link_event(pf);
220 return 0;
221}
222
223static int otx2_process_mbox_msg_up(struct otx2_nic *pf,
224 struct mbox_msghdr *req)
225{
226
227 if (req->sig != OTX2_MBOX_REQ_SIG) {
228 otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id);
229 return -ENODEV;
230 }
231
232 switch (req->id) {
233#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
234 case _id: { \
235 struct _rsp_type *rsp; \
236 int err; \
237 \
238 rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
239 &pf->mbox.mbox_up, 0, \
240 sizeof(struct _rsp_type)); \
241 if (!rsp) \
242 return -ENOMEM; \
243 \
244 rsp->hdr.id = _id; \
245 rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
246 rsp->hdr.pcifunc = 0; \
247 rsp->hdr.rc = 0; \
248 \
249 err = otx2_mbox_up_handler_ ## _fn_name( \
250 pf, (struct _req_type *)req, rsp); \
251 return err; \
252 }
253MBOX_UP_CGX_MESSAGES
254#undef M
255 break;
256 default:
257 otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id);
258 return -ENODEV;
259 }
260 return 0;
261}
262
263static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
264{
265 struct mbox *af_mbox = container_of(work, struct mbox, mbox_up_wrk);
266 struct otx2_mbox *mbox = &af_mbox->mbox_up;
267 struct otx2_mbox_dev *mdev = &mbox->dev[0];
268 struct otx2_nic *pf = af_mbox->pfvf;
269 int offset, id, devid = 0;
270 struct mbox_hdr *rsp_hdr;
271 struct mbox_msghdr *msg;
272
273 rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
274
275 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
276
277 for (id = 0; id < af_mbox->up_num_msgs; id++) {
278 msg = (struct mbox_msghdr *)(mdev->mbase + offset);
279
280 devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
281
282 if (!devid)
283 otx2_process_mbox_msg_up(pf, msg);
284 offset = mbox->rx_start + msg->next_msgoff;
285 }
286
287 otx2_mbox_msg_send(mbox, 0);
288}
289
290static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
291{
292 struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
293 struct mbox *mbox;
294
295
296 otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
297
298 mbox = &pf->mbox;
299 otx2_queue_work(mbox, pf->mbox_wq, 0, 1, 1, TYPE_PFAF);
300
301 return IRQ_HANDLED;
302}
303
304static void otx2_disable_mbox_intr(struct otx2_nic *pf)
305{
306 int vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX);
307
308
309 otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0));
310 free_irq(vector, pf);
311}
312
313static int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af)
314{
315 struct otx2_hw *hw = &pf->hw;
316 struct msg_req *req;
317 char *irq_name;
318 int err;
319
320
321 irq_name = &hw->irq_name[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE];
322 snprintf(irq_name, NAME_SIZE, "RVUPFAF Mbox");
323 err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX),
324 otx2_pfaf_mbox_intr_handler, 0, irq_name, pf);
325 if (err) {
326 dev_err(pf->dev,
327 "RVUPF: IRQ registration failed for PFAF mbox irq\n");
328 return err;
329 }
330
331
332
333
334 otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
335 otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0));
336
337 if (!probe_af)
338 return 0;
339
340
341 req = otx2_mbox_alloc_msg_ready(&pf->mbox);
342 if (!req) {
343 otx2_disable_mbox_intr(pf);
344 return -ENOMEM;
345 }
346 err = otx2_sync_mbox_msg(&pf->mbox);
347 if (err) {
348 dev_warn(pf->dev,
349 "AF not responding to mailbox, deferring probe\n");
350 otx2_disable_mbox_intr(pf);
351 return -EPROBE_DEFER;
352 }
353
354 return 0;
355}
356
357static void otx2_pfaf_mbox_destroy(struct otx2_nic *pf)
358{
359 struct mbox *mbox = &pf->mbox;
360
361 if (pf->mbox_wq) {
362 flush_workqueue(pf->mbox_wq);
363 destroy_workqueue(pf->mbox_wq);
364 pf->mbox_wq = NULL;
365 }
366
367 if (mbox->mbox.hwbase)
368 iounmap((void __iomem *)mbox->mbox.hwbase);
369
370 otx2_mbox_destroy(&mbox->mbox);
371 otx2_mbox_destroy(&mbox->mbox_up);
372}
373
374static int otx2_pfaf_mbox_init(struct otx2_nic *pf)
375{
376 struct mbox *mbox = &pf->mbox;
377 void __iomem *hwbase;
378 int err;
379
380 mbox->pfvf = pf;
381 pf->mbox_wq = alloc_workqueue("otx2_pfaf_mailbox",
382 WQ_UNBOUND | WQ_HIGHPRI |
383 WQ_MEM_RECLAIM, 1);
384 if (!pf->mbox_wq)
385 return -ENOMEM;
386
387
388
389
390
391 hwbase = ioremap_wc(pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM),
392 pci_resource_len(pf->pdev, PCI_MBOX_BAR_NUM));
393 if (!hwbase) {
394 dev_err(pf->dev, "Unable to map PFAF mailbox region\n");
395 err = -ENOMEM;
396 goto exit;
397 }
398
399 err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base,
400 MBOX_DIR_PFAF, 1);
401 if (err)
402 goto exit;
403
404 err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base,
405 MBOX_DIR_PFAF_UP, 1);
406 if (err)
407 goto exit;
408
409 err = otx2_mbox_bbuf_init(mbox, pf->pdev);
410 if (err)
411 goto exit;
412
413 INIT_WORK(&mbox->mbox_wrk, otx2_pfaf_mbox_handler);
414 INIT_WORK(&mbox->mbox_up_wrk, otx2_pfaf_mbox_up_handler);
415 otx2_mbox_lock_init(&pf->mbox);
416
417 return 0;
418exit:
419 otx2_pfaf_mbox_destroy(pf);
420 return err;
421}
422
423static int otx2_cgx_config_linkevents(struct otx2_nic *pf, bool enable)
424{
425 struct msg_req *msg;
426 int err;
427
428 otx2_mbox_lock(&pf->mbox);
429 if (enable)
430 msg = otx2_mbox_alloc_msg_cgx_start_linkevents(&pf->mbox);
431 else
432 msg = otx2_mbox_alloc_msg_cgx_stop_linkevents(&pf->mbox);
433
434 if (!msg) {
435 otx2_mbox_unlock(&pf->mbox);
436 return -ENOMEM;
437 }
438
439 err = otx2_sync_mbox_msg(&pf->mbox);
440 otx2_mbox_unlock(&pf->mbox);
441 return err;
442}
443
444static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
445{
446 struct msg_req *msg;
447 int err;
448
449 otx2_mbox_lock(&pf->mbox);
450 if (enable)
451 msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox);
452 else
453 msg = otx2_mbox_alloc_msg_cgx_intlbk_disable(&pf->mbox);
454
455 if (!msg) {
456 otx2_mbox_unlock(&pf->mbox);
457 return -ENOMEM;
458 }
459
460 err = otx2_sync_mbox_msg(&pf->mbox);
461 otx2_mbox_unlock(&pf->mbox);
462 return err;
463}
464
465int otx2_set_real_num_queues(struct net_device *netdev,
466 int tx_queues, int rx_queues)
467{
468 int err;
469
470 err = netif_set_real_num_tx_queues(netdev, tx_queues);
471 if (err) {
472 netdev_err(netdev,
473 "Failed to set no of Tx queues: %d\n", tx_queues);
474 return err;
475 }
476
477 err = netif_set_real_num_rx_queues(netdev, rx_queues);
478 if (err)
479 netdev_err(netdev,
480 "Failed to set no of Rx queues: %d\n", rx_queues);
481 return err;
482}
483
484static irqreturn_t otx2_q_intr_handler(int irq, void *data)
485{
486 struct otx2_nic *pf = data;
487 u64 val, *ptr;
488 u64 qidx = 0;
489
490
491 for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) {
492 ptr = otx2_get_regaddr(pf, NIX_LF_CQ_OP_INT);
493 val = otx2_atomic64_add((qidx << 44), ptr);
494
495 otx2_write64(pf, NIX_LF_CQ_OP_INT, (qidx << 44) |
496 (val & NIX_CQERRINT_BITS));
497 if (!(val & (NIX_CQERRINT_BITS | BIT_ULL(42))))
498 continue;
499
500 if (val & BIT_ULL(42)) {
501 netdev_err(pf->netdev, "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
502 qidx, otx2_read64(pf, NIX_LF_ERR_INT));
503 } else {
504 if (val & BIT_ULL(NIX_CQERRINT_DOOR_ERR))
505 netdev_err(pf->netdev, "CQ%lld: Doorbell error",
506 qidx);
507 if (val & BIT_ULL(NIX_CQERRINT_CQE_FAULT))
508 netdev_err(pf->netdev, "CQ%lld: Memory fault on CQE write to LLC/DRAM",
509 qidx);
510 }
511
512 schedule_work(&pf->reset_task);
513 }
514
515
516 for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
517 ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT);
518 val = otx2_atomic64_add((qidx << 44), ptr);
519 otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) |
520 (val & NIX_SQINT_BITS));
521
522 if (!(val & (NIX_SQINT_BITS | BIT_ULL(42))))
523 continue;
524
525 if (val & BIT_ULL(42)) {
526 netdev_err(pf->netdev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
527 qidx, otx2_read64(pf, NIX_LF_ERR_INT));
528 } else {
529 if (val & BIT_ULL(NIX_SQINT_LMT_ERR)) {
530 netdev_err(pf->netdev, "SQ%lld: LMT store error NIX_LF_SQ_OP_ERR_DBG:0x%llx",
531 qidx,
532 otx2_read64(pf,
533 NIX_LF_SQ_OP_ERR_DBG));
534 otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG,
535 BIT_ULL(44));
536 }
537 if (val & BIT_ULL(NIX_SQINT_MNQ_ERR)) {
538 netdev_err(pf->netdev, "SQ%lld: Meta-descriptor enqueue error NIX_LF_MNQ_ERR_DGB:0x%llx\n",
539 qidx,
540 otx2_read64(pf, NIX_LF_MNQ_ERR_DBG));
541 otx2_write64(pf, NIX_LF_MNQ_ERR_DBG,
542 BIT_ULL(44));
543 }
544 if (val & BIT_ULL(NIX_SQINT_SEND_ERR)) {
545 netdev_err(pf->netdev, "SQ%lld: Send error, NIX_LF_SEND_ERR_DBG 0x%llx",
546 qidx,
547 otx2_read64(pf,
548 NIX_LF_SEND_ERR_DBG));
549 otx2_write64(pf, NIX_LF_SEND_ERR_DBG,
550 BIT_ULL(44));
551 }
552 if (val & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL))
553 netdev_err(pf->netdev, "SQ%lld: SQB allocation failed",
554 qidx);
555 }
556
557 schedule_work(&pf->reset_task);
558 }
559
560 return IRQ_HANDLED;
561}
562
563static irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq)
564{
565 struct otx2_cq_poll *cq_poll = (struct otx2_cq_poll *)cq_irq;
566 struct otx2_nic *pf = (struct otx2_nic *)cq_poll->dev;
567 int qidx = cq_poll->cint_idx;
568
569
570
571
572
573
574 otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
575
576
577 napi_schedule_irqoff(&cq_poll->napi);
578
579 return IRQ_HANDLED;
580}
581
582static void otx2_disable_napi(struct otx2_nic *pf)
583{
584 struct otx2_qset *qset = &pf->qset;
585 struct otx2_cq_poll *cq_poll;
586 int qidx;
587
588 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
589 cq_poll = &qset->napi[qidx];
590 napi_disable(&cq_poll->napi);
591 netif_napi_del(&cq_poll->napi);
592 }
593}
594
595static void otx2_free_cq_res(struct otx2_nic *pf)
596{
597 struct otx2_qset *qset = &pf->qset;
598 struct otx2_cq_queue *cq;
599 int qidx;
600
601
602 otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_CQ, false);
603 for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
604 cq = &qset->cq[qidx];
605 qmem_free(pf->dev, cq->cqe);
606 }
607}
608
609static void otx2_free_sq_res(struct otx2_nic *pf)
610{
611 struct otx2_qset *qset = &pf->qset;
612 struct otx2_snd_queue *sq;
613 int qidx;
614
615
616 otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false);
617
618 otx2_sq_free_sqbs(pf);
619 for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
620 sq = &qset->sq[qidx];
621 qmem_free(pf->dev, sq->sqe);
622 qmem_free(pf->dev, sq->tso_hdrs);
623 kfree(sq->sg);
624 kfree(sq->sqb_ptrs);
625 }
626}
627
628static int otx2_init_hw_resources(struct otx2_nic *pf)
629{
630 struct mbox *mbox = &pf->mbox;
631 struct otx2_hw *hw = &pf->hw;
632 struct msg_req *req;
633 int err = 0, lvl;
634
635
636
637
638
639 hw->rqpool_cnt = hw->rx_queues;
640 hw->sqpool_cnt = hw->tx_queues;
641 hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
642
643
644 pf->rbsize = RCV_FRAG_LEN(pf->netdev->mtu + OTX2_ETH_HLEN);
645
646 otx2_mbox_lock(mbox);
647
648 err = otx2_config_npa(pf);
649 if (err)
650 goto exit;
651
652
653 err = otx2_config_nix(pf);
654 if (err)
655 goto err_free_npa_lf;
656
657
658 err = otx2_rq_aura_pool_init(pf);
659 if (err) {
660 otx2_mbox_unlock(mbox);
661 goto err_free_nix_lf;
662 }
663
664 err = otx2_sq_aura_pool_init(pf);
665 if (err) {
666 otx2_mbox_unlock(mbox);
667 goto err_free_rq_ptrs;
668 }
669
670 err = otx2_txsch_alloc(pf);
671 if (err) {
672 otx2_mbox_unlock(mbox);
673 goto err_free_sq_ptrs;
674 }
675
676 err = otx2_config_nix_queues(pf);
677 if (err) {
678 otx2_mbox_unlock(mbox);
679 goto err_free_txsch;
680 }
681 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
682 err = otx2_txschq_config(pf, lvl);
683 if (err) {
684 otx2_mbox_unlock(mbox);
685 goto err_free_nix_queues;
686 }
687 }
688 otx2_mbox_unlock(mbox);
689 return err;
690
691err_free_nix_queues:
692 otx2_free_sq_res(pf);
693 otx2_free_cq_res(pf);
694 otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
695err_free_txsch:
696 if (otx2_txschq_stop(pf))
697 dev_err(pf->dev, "%s failed to stop TX schedulers\n", __func__);
698err_free_sq_ptrs:
699 otx2_sq_free_sqbs(pf);
700err_free_rq_ptrs:
701 otx2_free_aura_ptr(pf, AURA_NIX_RQ);
702 otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
703 otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
704 otx2_aura_pool_free(pf);
705err_free_nix_lf:
706 otx2_mbox_lock(mbox);
707 req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
708 if (req) {
709 if (otx2_sync_mbox_msg(mbox))
710 dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
711 }
712err_free_npa_lf:
713
714 req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
715 if (req) {
716 if (otx2_sync_mbox_msg(mbox))
717 dev_err(pf->dev, "%s failed to free npalf\n", __func__);
718 }
719exit:
720 otx2_mbox_unlock(mbox);
721 return err;
722}
723
724static void otx2_free_hw_resources(struct otx2_nic *pf)
725{
726 struct otx2_qset *qset = &pf->qset;
727 struct mbox *mbox = &pf->mbox;
728 struct otx2_cq_queue *cq;
729 struct msg_req *req;
730 int qidx, err;
731
732
733 otx2_sqb_flush(pf);
734
735
736 err = otx2_txschq_stop(pf);
737 if (err)
738 dev_err(pf->dev, "RVUPF: Failed to stop/free TX schedulers\n");
739
740
741 otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
742
743
744 for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
745 cq = &qset->cq[qidx];
746 if (cq->cq_type == CQ_RX)
747 otx2_cleanup_rx_cqes(pf, cq);
748 else
749 otx2_cleanup_tx_cqes(pf, cq);
750 }
751
752 otx2_free_sq_res(pf);
753
754
755 otx2_free_aura_ptr(pf, AURA_NIX_RQ);
756
757 otx2_free_cq_res(pf);
758
759 otx2_mbox_lock(mbox);
760
761 req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
762 if (req) {
763 if (otx2_sync_mbox_msg(mbox))
764 dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
765 }
766 otx2_mbox_unlock(mbox);
767
768
769 otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
770 otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
771 otx2_aura_pool_free(pf);
772
773 otx2_mbox_lock(mbox);
774
775 req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
776 if (req) {
777 if (otx2_sync_mbox_msg(mbox))
778 dev_err(pf->dev, "%s failed to free npalf\n", __func__);
779 }
780 otx2_mbox_unlock(mbox);
781}
782
783int otx2_open(struct net_device *netdev)
784{
785 struct otx2_nic *pf = netdev_priv(netdev);
786 struct otx2_cq_poll *cq_poll = NULL;
787 struct otx2_qset *qset = &pf->qset;
788 int err = 0, qidx, vec;
789 char *irq_name;
790
791 netif_carrier_off(netdev);
792
793 pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tx_queues;
794
795
796
797 pf->hw.cint_cnt = max(pf->hw.rx_queues, pf->hw.tx_queues);
798 qset->napi = kcalloc(pf->hw.cint_cnt, sizeof(*cq_poll), GFP_KERNEL);
799 if (!qset->napi)
800 return -ENOMEM;
801
802
803 qset->rqe_cnt = qset->rqe_cnt ? qset->rqe_cnt : Q_COUNT(Q_SIZE_256);
804
805 qset->sqe_cnt = qset->sqe_cnt ? qset->sqe_cnt : Q_COUNT(Q_SIZE_4K);
806
807 err = -ENOMEM;
808 qset->cq = kcalloc(pf->qset.cq_cnt,
809 sizeof(struct otx2_cq_queue), GFP_KERNEL);
810 if (!qset->cq)
811 goto err_free_mem;
812
813 qset->sq = kcalloc(pf->hw.tx_queues,
814 sizeof(struct otx2_snd_queue), GFP_KERNEL);
815 if (!qset->sq)
816 goto err_free_mem;
817
818 qset->rq = kcalloc(pf->hw.rx_queues,
819 sizeof(struct otx2_rcv_queue), GFP_KERNEL);
820 if (!qset->rq)
821 goto err_free_mem;
822
823 err = otx2_init_hw_resources(pf);
824 if (err)
825 goto err_free_mem;
826
827
828 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
829 cq_poll = &qset->napi[qidx];
830 cq_poll->cint_idx = qidx;
831
832
833
834
835 cq_poll->cq_ids[CQ_RX] =
836 (qidx < pf->hw.rx_queues) ? qidx : CINT_INVALID_CQ;
837 cq_poll->cq_ids[CQ_TX] = (qidx < pf->hw.tx_queues) ?
838 qidx + pf->hw.rx_queues : CINT_INVALID_CQ;
839 cq_poll->dev = (void *)pf;
840 netif_napi_add(netdev, &cq_poll->napi,
841 otx2_napi_handler, NAPI_POLL_WEIGHT);
842 napi_enable(&cq_poll->napi);
843 }
844
845
846 err = otx2_hw_set_mtu(pf, netdev->mtu);
847 if (err)
848 goto err_disable_napi;
849
850
851 err = otx2_rss_init(pf);
852 if (err)
853 goto err_disable_napi;
854
855
856 vec = pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START;
857 irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
858
859 snprintf(irq_name, NAME_SIZE, "%s-qerr", pf->netdev->name);
860
861 err = request_irq(pci_irq_vector(pf->pdev, vec),
862 otx2_q_intr_handler, 0, irq_name, pf);
863 if (err) {
864 dev_err(pf->dev,
865 "RVUPF%d: IRQ registration failed for QERR\n",
866 rvu_get_pf(pf->pcifunc));
867 goto err_disable_napi;
868 }
869
870
871 otx2_write64(pf, NIX_LF_QINTX_ENA_W1S(0), BIT_ULL(0));
872
873
874 vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
875 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
876 irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
877
878 snprintf(irq_name, NAME_SIZE, "%s-rxtx-%d", pf->netdev->name,
879 qidx);
880
881 err = request_irq(pci_irq_vector(pf->pdev, vec),
882 otx2_cq_intr_handler, 0, irq_name,
883 &qset->napi[qidx]);
884 if (err) {
885 dev_err(pf->dev,
886 "RVUPF%d: IRQ registration failed for CQ%d\n",
887 rvu_get_pf(pf->pcifunc), qidx);
888 goto err_free_cints;
889 }
890 vec++;
891
892 otx2_config_irq_coalescing(pf, qidx);
893
894
895 otx2_write64(pf, NIX_LF_CINTX_INT(qidx), BIT_ULL(0));
896 otx2_write64(pf, NIX_LF_CINTX_ENA_W1S(qidx), BIT_ULL(0));
897 }
898
899 otx2_set_cints_affinity(pf);
900
901 pf->flags &= ~OTX2_FLAG_INTF_DOWN;
902
903 smp_wmb();
904
905
906 if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK))
907 otx2_handle_link_event(pf);
908
909 err = otx2_rxtx_enable(pf, true);
910 if (err)
911 goto err_free_cints;
912
913 return 0;
914
915err_free_cints:
916 otx2_free_cints(pf, qidx);
917 vec = pci_irq_vector(pf->pdev,
918 pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
919 otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
920 synchronize_irq(vec);
921 free_irq(vec, pf);
922err_disable_napi:
923 otx2_disable_napi(pf);
924 otx2_free_hw_resources(pf);
925err_free_mem:
926 kfree(qset->sq);
927 kfree(qset->cq);
928 kfree(qset->rq);
929 kfree(qset->napi);
930 return err;
931}
932
933int otx2_stop(struct net_device *netdev)
934{
935 struct otx2_nic *pf = netdev_priv(netdev);
936 struct otx2_cq_poll *cq_poll = NULL;
937 struct otx2_qset *qset = &pf->qset;
938 int qidx, vec, wrk;
939
940 netif_carrier_off(netdev);
941 netif_tx_stop_all_queues(netdev);
942
943 pf->flags |= OTX2_FLAG_INTF_DOWN;
944
945 smp_wmb();
946
947
948 otx2_rxtx_enable(pf, false);
949
950
951 vec = pci_irq_vector(pf->pdev,
952 pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
953 otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
954 synchronize_irq(vec);
955 free_irq(vec, pf);
956
957
958 vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
959 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
960
961 otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
962
963 synchronize_irq(pci_irq_vector(pf->pdev, vec));
964
965 cq_poll = &qset->napi[qidx];
966 napi_synchronize(&cq_poll->napi);
967 vec++;
968 }
969
970 netif_tx_disable(netdev);
971
972 otx2_free_hw_resources(pf);
973 otx2_free_cints(pf, pf->hw.cint_cnt);
974 otx2_disable_napi(pf);
975
976 for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
977 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
978
979 for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++)
980 cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work);
981 devm_kfree(pf->dev, pf->refill_wrk);
982
983 kfree(qset->sq);
984 kfree(qset->cq);
985 kfree(qset->rq);
986 kfree(qset->napi);
987
988 memset((void *)qset + offsetof(struct otx2_qset, sqe_cnt), 0,
989 sizeof(*qset) - offsetof(struct otx2_qset, sqe_cnt));
990 return 0;
991}
992
993static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
994{
995 struct otx2_nic *pf = netdev_priv(netdev);
996 int qidx = skb_get_queue_mapping(skb);
997 struct otx2_snd_queue *sq;
998 struct netdev_queue *txq;
999
1000
1001 if (skb->len <= ETH_HLEN ||
1002 (!skb_shinfo(skb)->gso_size && skb->len > pf->max_frs)) {
1003 dev_kfree_skb(skb);
1004 return NETDEV_TX_OK;
1005 }
1006
1007 sq = &pf->qset.sq[qidx];
1008 txq = netdev_get_tx_queue(netdev, qidx);
1009
1010 if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
1011 netif_tx_stop_queue(txq);
1012
1013
1014 smp_mb();
1015 if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb)
1016 > sq->sqe_thresh)
1017 netif_tx_wake_queue(txq);
1018
1019 return NETDEV_TX_BUSY;
1020 }
1021
1022 return NETDEV_TX_OK;
1023}
1024
1025static void otx2_set_rx_mode(struct net_device *netdev)
1026{
1027 struct otx2_nic *pf = netdev_priv(netdev);
1028 struct nix_rx_mode *req;
1029
1030 if (!(netdev->flags & IFF_UP))
1031 return;
1032
1033 otx2_mbox_lock(&pf->mbox);
1034 req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox);
1035 if (!req) {
1036 otx2_mbox_unlock(&pf->mbox);
1037 return;
1038 }
1039
1040 req->mode = NIX_RX_MODE_UCAST;
1041
1042
1043 if (netdev->flags & IFF_PROMISC)
1044 req->mode |= NIX_RX_MODE_PROMISC;
1045 else if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
1046 req->mode |= NIX_RX_MODE_ALLMULTI;
1047
1048 otx2_sync_mbox_msg(&pf->mbox);
1049 otx2_mbox_unlock(&pf->mbox);
1050}
1051
1052static int otx2_set_features(struct net_device *netdev,
1053 netdev_features_t features)
1054{
1055 netdev_features_t changed = features ^ netdev->features;
1056 struct otx2_nic *pf = netdev_priv(netdev);
1057
1058 if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
1059 return otx2_cgx_config_loopback(pf,
1060 features & NETIF_F_LOOPBACK);
1061 return 0;
1062}
1063
1064static void otx2_reset_task(struct work_struct *work)
1065{
1066 struct otx2_nic *pf = container_of(work, struct otx2_nic, reset_task);
1067
1068 if (!netif_running(pf->netdev))
1069 return;
1070
1071 otx2_stop(pf->netdev);
1072 pf->reset_count++;
1073 otx2_open(pf->netdev);
1074 netif_trans_update(pf->netdev);
1075}
1076
1077static const struct net_device_ops otx2_netdev_ops = {
1078 .ndo_open = otx2_open,
1079 .ndo_stop = otx2_stop,
1080 .ndo_start_xmit = otx2_xmit,
1081 .ndo_set_mac_address = otx2_set_mac_address,
1082 .ndo_change_mtu = otx2_change_mtu,
1083 .ndo_set_rx_mode = otx2_set_rx_mode,
1084 .ndo_set_features = otx2_set_features,
1085 .ndo_tx_timeout = otx2_tx_timeout,
1086 .ndo_get_stats64 = otx2_get_stats64,
1087};
1088
1089static int otx2_check_pf_usable(struct otx2_nic *nic)
1090{
1091 u64 rev;
1092
1093 rev = otx2_read64(nic, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
1094 rev = (rev >> 12) & 0xFF;
1095
1096
1097
1098
1099 if (!rev) {
1100 dev_warn(nic->dev,
1101 "AF is not initialized, deferring probe\n");
1102 return -EPROBE_DEFER;
1103 }
1104 return 0;
1105}
1106
1107static int otx2_realloc_msix_vectors(struct otx2_nic *pf)
1108{
1109 struct otx2_hw *hw = &pf->hw;
1110 int num_vec, err;
1111
1112
1113
1114
1115 num_vec = hw->nix_msixoff;
1116 num_vec += NIX_LF_CINT_VEC_START + hw->max_queues;
1117
1118 otx2_disable_mbox_intr(pf);
1119 pci_free_irq_vectors(hw->pdev);
1120 pci_free_irq_vectors(hw->pdev);
1121 err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
1122 if (err < 0) {
1123 dev_err(pf->dev, "%s: Failed to realloc %d IRQ vectors\n",
1124 __func__, num_vec);
1125 return err;
1126 }
1127
1128 return otx2_register_mbox_intr(pf, false);
1129}
1130
1131static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1132{
1133 struct device *dev = &pdev->dev;
1134 struct net_device *netdev;
1135 struct otx2_nic *pf;
1136 struct otx2_hw *hw;
1137 int err, qcount;
1138 int num_vec;
1139
1140 err = pcim_enable_device(pdev);
1141 if (err) {
1142 dev_err(dev, "Failed to enable PCI device\n");
1143 return err;
1144 }
1145
1146 err = pci_request_regions(pdev, DRV_NAME);
1147 if (err) {
1148 dev_err(dev, "PCI request regions failed 0x%x\n", err);
1149 return err;
1150 }
1151
1152 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
1153 if (err) {
1154 dev_err(dev, "DMA mask config failed, abort\n");
1155 goto err_release_regions;
1156 }
1157
1158 pci_set_master(pdev);
1159
1160
1161 qcount = min_t(int, num_online_cpus(), OTX2_MAX_CQ_CNT);
1162
1163 netdev = alloc_etherdev_mqs(sizeof(*pf), qcount, qcount);
1164 if (!netdev) {
1165 err = -ENOMEM;
1166 goto err_release_regions;
1167 }
1168
1169 pci_set_drvdata(pdev, netdev);
1170 SET_NETDEV_DEV(netdev, &pdev->dev);
1171 pf = netdev_priv(netdev);
1172 pf->netdev = netdev;
1173 pf->pdev = pdev;
1174 pf->dev = dev;
1175 pf->flags |= OTX2_FLAG_INTF_DOWN;
1176
1177 hw = &pf->hw;
1178 hw->pdev = pdev;
1179 hw->rx_queues = qcount;
1180 hw->tx_queues = qcount;
1181 hw->max_queues = qcount;
1182
1183 num_vec = pci_msix_vec_count(pdev);
1184 hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
1185 GFP_KERNEL);
1186 if (!hw->irq_name)
1187 goto err_free_netdev;
1188
1189 hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec,
1190 sizeof(cpumask_var_t), GFP_KERNEL);
1191 if (!hw->affinity_mask)
1192 goto err_free_netdev;
1193
1194
1195 pf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
1196 if (!pf->reg_base) {
1197 dev_err(dev, "Unable to map physical function CSRs, aborting\n");
1198 err = -ENOMEM;
1199 goto err_free_netdev;
1200 }
1201
1202 err = otx2_check_pf_usable(pf);
1203 if (err)
1204 goto err_free_netdev;
1205
1206 err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT,
1207 RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
1208 if (err < 0) {
1209 dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n",
1210 __func__, num_vec);
1211 goto err_free_netdev;
1212 }
1213
1214
1215 err = otx2_pfaf_mbox_init(pf);
1216 if (err)
1217 goto err_free_irq_vectors;
1218
1219
1220 err = otx2_register_mbox_intr(pf, true);
1221 if (err)
1222 goto err_mbox_destroy;
1223
1224
1225
1226
1227 err = otx2_attach_npa_nix(pf);
1228 if (err)
1229 goto err_disable_mbox_intr;
1230
1231 err = otx2_realloc_msix_vectors(pf);
1232 if (err)
1233 goto err_detach_rsrc;
1234
1235 err = otx2_set_real_num_queues(netdev, hw->tx_queues, hw->rx_queues);
1236 if (err)
1237 goto err_detach_rsrc;
1238
1239 otx2_setup_dev_hw_settings(pf);
1240
1241
1242 otx2_get_mac_from_af(netdev);
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255 pf->iommu_domain = iommu_get_domain_for_dev(dev);
1256
1257 netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
1258 NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
1259 NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6);
1260 netdev->features |= netdev->hw_features;
1261
1262 netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL;
1263
1264 netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
1265 netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
1266
1267 netdev->netdev_ops = &otx2_netdev_ops;
1268
1269
1270 netdev->min_mtu = OTX2_MIN_MTU;
1271 netdev->max_mtu = OTX2_MAX_MTU;
1272
1273 INIT_WORK(&pf->reset_task, otx2_reset_task);
1274
1275 err = register_netdev(netdev);
1276 if (err) {
1277 dev_err(dev, "Failed to register netdevice\n");
1278 goto err_detach_rsrc;
1279 }
1280
1281 otx2_set_ethtool_ops(netdev);
1282
1283
1284 otx2_cgx_config_linkevents(pf, true);
1285
1286 return 0;
1287
1288err_detach_rsrc:
1289 otx2_detach_resources(&pf->mbox);
1290err_disable_mbox_intr:
1291 otx2_disable_mbox_intr(pf);
1292err_mbox_destroy:
1293 otx2_pfaf_mbox_destroy(pf);
1294err_free_irq_vectors:
1295 pci_free_irq_vectors(hw->pdev);
1296err_free_netdev:
1297 pci_set_drvdata(pdev, NULL);
1298 free_netdev(netdev);
1299err_release_regions:
1300 pci_release_regions(pdev);
1301 return err;
1302}
1303
1304static void otx2_remove(struct pci_dev *pdev)
1305{
1306 struct net_device *netdev = pci_get_drvdata(pdev);
1307 struct otx2_nic *pf;
1308
1309 if (!netdev)
1310 return;
1311
1312 pf = netdev_priv(netdev);
1313
1314
1315 otx2_cgx_config_linkevents(pf, false);
1316
1317 unregister_netdev(netdev);
1318 otx2_detach_resources(&pf->mbox);
1319 otx2_disable_mbox_intr(pf);
1320 otx2_pfaf_mbox_destroy(pf);
1321 pci_free_irq_vectors(pf->pdev);
1322 pci_set_drvdata(pdev, NULL);
1323 free_netdev(netdev);
1324
1325 pci_release_regions(pdev);
1326}
1327
1328static struct pci_driver otx2_pf_driver = {
1329 .name = DRV_NAME,
1330 .id_table = otx2_pf_id_table,
1331 .probe = otx2_probe,
1332 .shutdown = otx2_remove,
1333 .remove = otx2_remove,
1334};
1335
1336static int __init otx2_rvupf_init_module(void)
1337{
1338 pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
1339
1340 return pci_register_driver(&otx2_pf_driver);
1341}
1342
1343static void __exit otx2_rvupf_cleanup_module(void)
1344{
1345 pci_unregister_driver(&otx2_pf_driver);
1346}
1347
1348module_init(otx2_rvupf_init_module);
1349module_exit(otx2_rvupf_cleanup_module);
1350