1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39#include <linux/module.h>
40#include <linux/netdevice.h>
41#include <linux/ethtool.h>
42#include <linux/mutex.h>
43#include <linux/list.h>
44#include <linux/rculist.h>
45#include <linux/spinlock.h>
46#include <linux/pci.h>
47#include <net/dcbnl.h>
48#include <net/ipv6.h>
49#include <net/addrconf.h>
50#include <linux/if_ether.h>
51
52#include <rdma/ib_verbs.h>
53#include <rdma/ib_user_verbs.h>
54#include <rdma/ib_umem.h>
55#include <rdma/ib_addr.h>
56
57#include "bnxt_ulp.h"
58#include "roce_hsi.h"
59#include "qplib_res.h"
60#include "qplib_sp.h"
61#include "qplib_fp.h"
62#include "qplib_rcfw.h"
63#include "bnxt_re.h"
64#include "ib_verbs.h"
65#include <rdma/bnxt_re-abi.h>
66#include "bnxt.h"
67#include "hw_counters.h"
68
69static char version[] =
70 BNXT_RE_DESC "\n";
71
72MODULE_AUTHOR("Eddie Wai <eddie.wai@broadcom.com>");
73MODULE_DESCRIPTION(BNXT_RE_DESC " Driver");
74MODULE_LICENSE("Dual BSD/GPL");
75
76
77static struct list_head bnxt_re_dev_list = LIST_HEAD_INIT(bnxt_re_dev_list);
78
79static DEFINE_MUTEX(bnxt_re_dev_lock);
80static struct workqueue_struct *bnxt_re_wq;
81static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev);
82
83static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev)
84{
85 rdev->rcfw.res = NULL;
86 rdev->qplib_res.cctx = NULL;
87}
88
89static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev)
90{
91 struct bnxt_en_dev *en_dev;
92 struct bnxt *bp;
93
94 en_dev = rdev->en_dev;
95 bp = netdev_priv(en_dev->net);
96
97 rdev->chip_ctx.chip_num = bp->chip_num;
98
99
100 rdev->qplib_res.cctx = &rdev->chip_ctx;
101 rdev->rcfw.res = &rdev->qplib_res;
102
103 return 0;
104}
105
106
107
108static void bnxt_re_get_sriov_func_type(struct bnxt_re_dev *rdev)
109{
110 struct bnxt *bp;
111
112 bp = netdev_priv(rdev->en_dev->net);
113 if (BNXT_VF(bp))
114 rdev->is_virtfn = 1;
115}
116
117
118
119
120
121
122static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
123{
124 u32 vf_qps = 0, vf_srqs = 0, vf_cqs = 0, vf_mrws = 0, vf_gids = 0;
125 u32 i;
126 u32 vf_pct;
127 u32 num_vfs;
128 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
129
130 rdev->qplib_ctx.qpc_count = min_t(u32, BNXT_RE_MAX_QPC_COUNT,
131 dev_attr->max_qp);
132
133 rdev->qplib_ctx.mrw_count = BNXT_RE_MAX_MRW_COUNT_256K;
134
135 rdev->qplib_ctx.mrw_count = min_t(u32, rdev->qplib_ctx.mrw_count,
136 dev_attr->max_mr);
137 rdev->qplib_ctx.srqc_count = min_t(u32, BNXT_RE_MAX_SRQC_COUNT,
138 dev_attr->max_srq);
139 rdev->qplib_ctx.cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT,
140 dev_attr->max_cq);
141
142 for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
143 rdev->qplib_ctx.tqm_count[i] =
144 rdev->dev_attr.tqm_alloc_reqs[i];
145
146 if (rdev->num_vfs) {
147
148
149
150
151 vf_pct = 100 - BNXT_RE_PCT_RSVD_FOR_PF;
152 num_vfs = 100 * rdev->num_vfs;
153 vf_qps = (rdev->qplib_ctx.qpc_count * vf_pct) / num_vfs;
154 vf_srqs = (rdev->qplib_ctx.srqc_count * vf_pct) / num_vfs;
155 vf_cqs = (rdev->qplib_ctx.cq_count * vf_pct) / num_vfs;
156
157
158
159
160
161
162
163
164
165 if (rdev->qplib_ctx.mrw_count < BNXT_RE_MAX_MRW_COUNT_64K)
166 vf_mrws = rdev->qplib_ctx.mrw_count * vf_pct / num_vfs;
167 else
168 vf_mrws = (rdev->qplib_ctx.mrw_count -
169 BNXT_RE_RESVD_MR_FOR_PF) / rdev->num_vfs;
170 vf_gids = BNXT_RE_MAX_GID_PER_VF;
171 }
172 rdev->qplib_ctx.vf_res.max_mrw_per_vf = vf_mrws;
173 rdev->qplib_ctx.vf_res.max_gid_per_vf = vf_gids;
174 rdev->qplib_ctx.vf_res.max_qp_per_vf = vf_qps;
175 rdev->qplib_ctx.vf_res.max_srq_per_vf = vf_srqs;
176 rdev->qplib_ctx.vf_res.max_cq_per_vf = vf_cqs;
177}
178
179
180static void bnxt_re_stop(void *p)
181{
182}
183
184static void bnxt_re_start(void *p)
185{
186}
187
188static void bnxt_re_sriov_config(void *p, int num_vfs)
189{
190 struct bnxt_re_dev *rdev = p;
191
192 if (!rdev)
193 return;
194
195 rdev->num_vfs = num_vfs;
196 bnxt_re_set_resource_limits(rdev);
197 bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw,
198 &rdev->qplib_ctx);
199}
200
201static void bnxt_re_shutdown(void *p)
202{
203 struct bnxt_re_dev *rdev = p;
204
205 if (!rdev)
206 return;
207
208 bnxt_re_ib_unreg(rdev);
209}
210
211static void bnxt_re_stop_irq(void *handle)
212{
213 struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
214 struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
215 struct bnxt_qplib_nq *nq;
216 int indx;
217
218 for (indx = BNXT_RE_NQ_IDX; indx < rdev->num_msix; indx++) {
219 nq = &rdev->nq[indx - 1];
220 bnxt_qplib_nq_stop_irq(nq, false);
221 }
222
223 bnxt_qplib_rcfw_stop_irq(rcfw, false);
224}
225
226static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
227{
228 struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
229 struct bnxt_msix_entry *msix_ent = rdev->msix_entries;
230 struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
231 struct bnxt_qplib_nq *nq;
232 int indx, rc;
233
234 if (!ent) {
235
236
237
238
239
240 dev_err(rdev_to_dev(rdev), "Failed to re-start IRQs\n");
241 return;
242 }
243
244
245
246
247 for (indx = 0; indx < rdev->num_msix; indx++)
248 rdev->msix_entries[indx].vector = ent[indx].vector;
249
250 bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector,
251 false);
252 for (indx = BNXT_RE_NQ_IDX ; indx < rdev->num_msix; indx++) {
253 nq = &rdev->nq[indx - 1];
254 rc = bnxt_qplib_nq_start_irq(nq, indx - 1,
255 msix_ent[indx].vector, false);
256 if (rc)
257 dev_warn(rdev_to_dev(rdev),
258 "Failed to reinit NQ index %d\n", indx - 1);
259 }
260}
261
262static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
263 .ulp_async_notifier = NULL,
264 .ulp_stop = bnxt_re_stop,
265 .ulp_start = bnxt_re_start,
266 .ulp_sriov_config = bnxt_re_sriov_config,
267 .ulp_shutdown = bnxt_re_shutdown,
268 .ulp_irq_stop = bnxt_re_stop_irq,
269 .ulp_irq_restart = bnxt_re_start_irq
270};
271
272
273
274
275
276
277static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev)
278{
279 struct bnxt_en_dev *en_dev;
280 int rc;
281
282 if (!rdev)
283 return -EINVAL;
284
285 en_dev = rdev->en_dev;
286
287 rc = en_dev->en_ops->bnxt_unregister_device(rdev->en_dev,
288 BNXT_ROCE_ULP);
289 return rc;
290}
291
292static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev)
293{
294 struct bnxt_en_dev *en_dev;
295 int rc = 0;
296
297 if (!rdev)
298 return -EINVAL;
299
300 en_dev = rdev->en_dev;
301
302 rc = en_dev->en_ops->bnxt_register_device(en_dev, BNXT_ROCE_ULP,
303 &bnxt_re_ulp_ops, rdev);
304 rdev->qplib_res.pdev = rdev->en_dev->pdev;
305 return rc;
306}
307
308static int bnxt_re_free_msix(struct bnxt_re_dev *rdev)
309{
310 struct bnxt_en_dev *en_dev;
311 int rc;
312
313 if (!rdev)
314 return -EINVAL;
315
316 en_dev = rdev->en_dev;
317
318
319 rc = en_dev->en_ops->bnxt_free_msix(rdev->en_dev, BNXT_ROCE_ULP);
320
321 return rc;
322}
323
324static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
325{
326 int rc = 0, num_msix_want = BNXT_RE_MAX_MSIX, num_msix_got;
327 struct bnxt_en_dev *en_dev;
328
329 if (!rdev)
330 return -EINVAL;
331
332 en_dev = rdev->en_dev;
333
334 num_msix_want = min_t(u32, BNXT_RE_MAX_MSIX, num_online_cpus());
335
336 num_msix_got = en_dev->en_ops->bnxt_request_msix(en_dev, BNXT_ROCE_ULP,
337 rdev->msix_entries,
338 num_msix_want);
339 if (num_msix_got < BNXT_RE_MIN_MSIX) {
340 rc = -EINVAL;
341 goto done;
342 }
343 if (num_msix_got != num_msix_want) {
344 dev_warn(rdev_to_dev(rdev),
345 "Requested %d MSI-X vectors, got %d\n",
346 num_msix_want, num_msix_got);
347 }
348 rdev->num_msix = num_msix_got;
349done:
350 return rc;
351}
352
353static void bnxt_re_init_hwrm_hdr(struct bnxt_re_dev *rdev, struct input *hdr,
354 u16 opcd, u16 crid, u16 trid)
355{
356 hdr->req_type = cpu_to_le16(opcd);
357 hdr->cmpl_ring = cpu_to_le16(crid);
358 hdr->target_id = cpu_to_le16(trid);
359}
360
361static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg, void *msg,
362 int msg_len, void *resp, int resp_max_len,
363 int timeout)
364{
365 fw_msg->msg = msg;
366 fw_msg->msg_len = msg_len;
367 fw_msg->resp = resp;
368 fw_msg->resp_max_len = resp_max_len;
369 fw_msg->timeout = timeout;
370}
371
372static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev,
373 u16 fw_ring_id, int type)
374{
375 struct bnxt_en_dev *en_dev = rdev->en_dev;
376 struct hwrm_ring_free_input req = {0};
377 struct hwrm_ring_free_output resp;
378 struct bnxt_fw_msg fw_msg;
379 int rc = -EINVAL;
380
381 if (!en_dev)
382 return rc;
383
384 memset(&fw_msg, 0, sizeof(fw_msg));
385
386 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_FREE, -1, -1);
387 req.ring_type = type;
388 req.ring_id = cpu_to_le16(fw_ring_id);
389 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
390 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
391 rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
392 if (rc)
393 dev_err(rdev_to_dev(rdev),
394 "Failed to free HW ring:%d :%#x", req.ring_id, rc);
395 return rc;
396}
397
398static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr,
399 int pages, int type, u32 ring_mask,
400 u32 map_index, u16 *fw_ring_id)
401{
402 struct bnxt_en_dev *en_dev = rdev->en_dev;
403 struct hwrm_ring_alloc_input req = {0};
404 struct hwrm_ring_alloc_output resp;
405 struct bnxt_fw_msg fw_msg;
406 int rc = -EINVAL;
407
408 if (!en_dev)
409 return rc;
410
411 memset(&fw_msg, 0, sizeof(fw_msg));
412 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1);
413 req.enables = 0;
414 req.page_tbl_addr = cpu_to_le64(dma_arr[0]);
415 if (pages > 1) {
416
417 req.page_size = BNXT_PAGE_SHIFT;
418 req.page_tbl_depth = 1;
419 }
420 req.fbo = 0;
421
422 req.logical_id = cpu_to_le16(map_index);
423 req.length = cpu_to_le32(ring_mask + 1);
424 req.ring_type = type;
425 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
426 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
427 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
428 rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
429 if (!rc)
430 *fw_ring_id = le16_to_cpu(resp.ring_id);
431
432 return rc;
433}
434
435static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
436 u32 fw_stats_ctx_id)
437{
438 struct bnxt_en_dev *en_dev = rdev->en_dev;
439 struct hwrm_stat_ctx_free_input req = {0};
440 struct bnxt_fw_msg fw_msg;
441 int rc = -EINVAL;
442
443 if (!en_dev)
444 return rc;
445
446 memset(&fw_msg, 0, sizeof(fw_msg));
447
448 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, -1);
449 req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id);
450 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&req,
451 sizeof(req), DFLT_HWRM_CMD_TIMEOUT);
452 rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
453 if (rc)
454 dev_err(rdev_to_dev(rdev),
455 "Failed to free HW stats context %#x", rc);
456
457 return rc;
458}
459
460static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
461 dma_addr_t dma_map,
462 u32 *fw_stats_ctx_id)
463{
464 struct hwrm_stat_ctx_alloc_output resp = {0};
465 struct hwrm_stat_ctx_alloc_input req = {0};
466 struct bnxt_en_dev *en_dev = rdev->en_dev;
467 struct bnxt_fw_msg fw_msg;
468 int rc = -EINVAL;
469
470 *fw_stats_ctx_id = INVALID_STATS_CTX_ID;
471
472 if (!en_dev)
473 return rc;
474
475 memset(&fw_msg, 0, sizeof(fw_msg));
476
477 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
478 req.update_period_ms = cpu_to_le32(1000);
479 req.stats_dma_addr = cpu_to_le64(dma_map);
480 req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE;
481 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
482 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
483 rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
484 if (!rc)
485 *fw_stats_ctx_id = le32_to_cpu(resp.stat_ctx_id);
486
487 return rc;
488}
489
490
491
492static bool is_bnxt_re_dev(struct net_device *netdev)
493{
494 struct ethtool_drvinfo drvinfo;
495
496 if (netdev->ethtool_ops && netdev->ethtool_ops->get_drvinfo) {
497 memset(&drvinfo, 0, sizeof(drvinfo));
498 netdev->ethtool_ops->get_drvinfo(netdev, &drvinfo);
499
500 if (strcmp(drvinfo.driver, "bnxt_en"))
501 return false;
502 return true;
503 }
504 return false;
505}
506
507static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev)
508{
509 struct bnxt_re_dev *rdev;
510
511 rcu_read_lock();
512 list_for_each_entry_rcu(rdev, &bnxt_re_dev_list, list) {
513 if (rdev->netdev == netdev) {
514 rcu_read_unlock();
515 return rdev;
516 }
517 }
518 rcu_read_unlock();
519 return NULL;
520}
521
522static void bnxt_re_dev_unprobe(struct net_device *netdev,
523 struct bnxt_en_dev *en_dev)
524{
525 dev_put(netdev);
526 module_put(en_dev->pdev->driver->driver.owner);
527}
528
529static struct bnxt_en_dev *bnxt_re_dev_probe(struct net_device *netdev)
530{
531 struct bnxt *bp = netdev_priv(netdev);
532 struct bnxt_en_dev *en_dev;
533 struct pci_dev *pdev;
534
535
536 if (!bp->ulp_probe)
537 return ERR_PTR(-EINVAL);
538
539 en_dev = bp->ulp_probe(netdev);
540 if (IS_ERR(en_dev))
541 return en_dev;
542
543 pdev = en_dev->pdev;
544 if (!pdev)
545 return ERR_PTR(-EINVAL);
546
547 if (!(en_dev->flags & BNXT_EN_FLAG_ROCE_CAP)) {
548 dev_info(&pdev->dev,
549 "%s: probe error: RoCE is not supported on this device",
550 ROCE_DRV_MODULE_NAME);
551 return ERR_PTR(-ENODEV);
552 }
553
554
555 if (!try_module_get(pdev->driver->driver.owner))
556 return ERR_PTR(-ENODEV);
557
558 dev_hold(netdev);
559
560 return en_dev;
561}
562
563static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
564 char *buf)
565{
566 struct bnxt_re_dev *rdev =
567 rdma_device_to_drv_device(device, struct bnxt_re_dev, ibdev);
568
569 return scnprintf(buf, PAGE_SIZE, "0x%x\n", rdev->en_dev->pdev->vendor);
570}
571static DEVICE_ATTR_RO(hw_rev);
572
573static ssize_t hca_type_show(struct device *device,
574 struct device_attribute *attr, char *buf)
575{
576 struct bnxt_re_dev *rdev =
577 rdma_device_to_drv_device(device, struct bnxt_re_dev, ibdev);
578
579 return scnprintf(buf, PAGE_SIZE, "%s\n", rdev->ibdev.node_desc);
580}
581static DEVICE_ATTR_RO(hca_type);
582
583static struct attribute *bnxt_re_attributes[] = {
584 &dev_attr_hw_rev.attr,
585 &dev_attr_hca_type.attr,
586 NULL
587};
588
589static const struct attribute_group bnxt_re_dev_attr_group = {
590 .attrs = bnxt_re_attributes,
591};
592
593static void bnxt_re_unregister_ib(struct bnxt_re_dev *rdev)
594{
595 ib_unregister_device(&rdev->ibdev);
596}
597
598static const struct ib_device_ops bnxt_re_dev_ops = {
599 .owner = THIS_MODULE,
600 .driver_id = RDMA_DRIVER_BNXT_RE,
601 .uverbs_abi_ver = BNXT_RE_ABI_VERSION,
602
603 .add_gid = bnxt_re_add_gid,
604 .alloc_hw_stats = bnxt_re_ib_alloc_hw_stats,
605 .alloc_mr = bnxt_re_alloc_mr,
606 .alloc_pd = bnxt_re_alloc_pd,
607 .alloc_ucontext = bnxt_re_alloc_ucontext,
608 .create_ah = bnxt_re_create_ah,
609 .create_cq = bnxt_re_create_cq,
610 .create_qp = bnxt_re_create_qp,
611 .create_srq = bnxt_re_create_srq,
612 .dealloc_pd = bnxt_re_dealloc_pd,
613 .dealloc_ucontext = bnxt_re_dealloc_ucontext,
614 .del_gid = bnxt_re_del_gid,
615 .dereg_mr = bnxt_re_dereg_mr,
616 .destroy_ah = bnxt_re_destroy_ah,
617 .destroy_cq = bnxt_re_destroy_cq,
618 .destroy_qp = bnxt_re_destroy_qp,
619 .destroy_srq = bnxt_re_destroy_srq,
620 .get_dev_fw_str = bnxt_re_query_fw_str,
621 .get_dma_mr = bnxt_re_get_dma_mr,
622 .get_hw_stats = bnxt_re_ib_get_hw_stats,
623 .get_link_layer = bnxt_re_get_link_layer,
624 .get_port_immutable = bnxt_re_get_port_immutable,
625 .map_mr_sg = bnxt_re_map_mr_sg,
626 .mmap = bnxt_re_mmap,
627 .modify_ah = bnxt_re_modify_ah,
628 .modify_device = bnxt_re_modify_device,
629 .modify_qp = bnxt_re_modify_qp,
630 .modify_srq = bnxt_re_modify_srq,
631 .poll_cq = bnxt_re_poll_cq,
632 .post_recv = bnxt_re_post_recv,
633 .post_send = bnxt_re_post_send,
634 .post_srq_recv = bnxt_re_post_srq_recv,
635 .query_ah = bnxt_re_query_ah,
636 .query_device = bnxt_re_query_device,
637 .query_pkey = bnxt_re_query_pkey,
638 .query_port = bnxt_re_query_port,
639 .query_qp = bnxt_re_query_qp,
640 .query_srq = bnxt_re_query_srq,
641 .reg_user_mr = bnxt_re_reg_user_mr,
642 .req_notify_cq = bnxt_re_req_notify_cq,
643 INIT_RDMA_OBJ_SIZE(ib_ah, bnxt_re_ah, ib_ah),
644 INIT_RDMA_OBJ_SIZE(ib_cq, bnxt_re_cq, ib_cq),
645 INIT_RDMA_OBJ_SIZE(ib_pd, bnxt_re_pd, ib_pd),
646 INIT_RDMA_OBJ_SIZE(ib_srq, bnxt_re_srq, ib_srq),
647 INIT_RDMA_OBJ_SIZE(ib_ucontext, bnxt_re_ucontext, ib_uctx),
648};
649
650static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
651{
652 struct ib_device *ibdev = &rdev->ibdev;
653 int ret;
654
655
656 ibdev->node_type = RDMA_NODE_IB_CA;
657 strlcpy(ibdev->node_desc, BNXT_RE_DESC " HCA",
658 strlen(BNXT_RE_DESC) + 5);
659 ibdev->phys_port_cnt = 1;
660
661 bnxt_qplib_get_guid(rdev->netdev->dev_addr, (u8 *)&ibdev->node_guid);
662
663 ibdev->num_comp_vectors = 1;
664 ibdev->dev.parent = &rdev->en_dev->pdev->dev;
665 ibdev->local_dma_lkey = BNXT_QPLIB_RSVD_LKEY;
666
667
668 ibdev->uverbs_cmd_mask =
669 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
670 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
671 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
672 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
673 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
674 (1ull << IB_USER_VERBS_CMD_REG_MR) |
675 (1ull << IB_USER_VERBS_CMD_REREG_MR) |
676 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
677 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
678 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
679 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
680 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
681 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
682 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
683 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
684 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
685 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
686 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
687 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
688 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
689 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
690 (1ull << IB_USER_VERBS_CMD_MODIFY_AH) |
691 (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
692 (1ull << IB_USER_VERBS_CMD_DESTROY_AH);
693
694
695
696 rdma_set_device_sysfs_group(ibdev, &bnxt_re_dev_attr_group);
697 ib_set_device_ops(ibdev, &bnxt_re_dev_ops);
698 ret = ib_device_set_netdev(&rdev->ibdev, rdev->netdev, 1);
699 if (ret)
700 return ret;
701
702 return ib_register_device(ibdev, "bnxt_re%d");
703}
704
705static void bnxt_re_dev_remove(struct bnxt_re_dev *rdev)
706{
707 dev_put(rdev->netdev);
708 rdev->netdev = NULL;
709
710 mutex_lock(&bnxt_re_dev_lock);
711 list_del_rcu(&rdev->list);
712 mutex_unlock(&bnxt_re_dev_lock);
713
714 synchronize_rcu();
715
716 ib_dealloc_device(&rdev->ibdev);
717
718}
719
720static struct bnxt_re_dev *bnxt_re_dev_add(struct net_device *netdev,
721 struct bnxt_en_dev *en_dev)
722{
723 struct bnxt_re_dev *rdev;
724
725
726 rdev = ib_alloc_device(bnxt_re_dev, ibdev);
727 if (!rdev) {
728 dev_err(NULL, "%s: bnxt_re_dev allocation failure!",
729 ROCE_DRV_MODULE_NAME);
730 return NULL;
731 }
732
733 rdev->netdev = netdev;
734 dev_hold(rdev->netdev);
735 rdev->en_dev = en_dev;
736 rdev->id = rdev->en_dev->pdev->devfn;
737 INIT_LIST_HEAD(&rdev->qp_list);
738 mutex_init(&rdev->qp_lock);
739 atomic_set(&rdev->qp_count, 0);
740 atomic_set(&rdev->cq_count, 0);
741 atomic_set(&rdev->srq_count, 0);
742 atomic_set(&rdev->mr_count, 0);
743 atomic_set(&rdev->mw_count, 0);
744 rdev->cosq[0] = 0xFFFF;
745 rdev->cosq[1] = 0xFFFF;
746
747 mutex_lock(&bnxt_re_dev_lock);
748 list_add_tail_rcu(&rdev->list, &bnxt_re_dev_list);
749 mutex_unlock(&bnxt_re_dev_lock);
750 return rdev;
751}
752
753static int bnxt_re_handle_unaffi_async_event(struct creq_func_event
754 *unaffi_async)
755{
756 switch (unaffi_async->event) {
757 case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR:
758 break;
759 case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR:
760 break;
761 case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR:
762 break;
763 case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR:
764 break;
765 case CREQ_FUNC_EVENT_EVENT_CQ_ERROR:
766 break;
767 case CREQ_FUNC_EVENT_EVENT_TQM_ERROR:
768 break;
769 case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR:
770 break;
771 case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR:
772 break;
773 case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR:
774 break;
775 case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR:
776 break;
777 case CREQ_FUNC_EVENT_EVENT_TIM_ERROR:
778 break;
779 default:
780 return -EINVAL;
781 }
782 return 0;
783}
784
785static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
786 struct bnxt_re_qp *qp)
787{
788 struct ib_event event;
789 unsigned int flags;
790
791 if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
792 flags = bnxt_re_lock_cqs(qp);
793 bnxt_qplib_add_flush_qp(&qp->qplib_qp);
794 bnxt_re_unlock_cqs(qp, flags);
795 }
796
797 memset(&event, 0, sizeof(event));
798 if (qp->qplib_qp.srq) {
799 event.device = &qp->rdev->ibdev;
800 event.element.qp = &qp->ib_qp;
801 event.event = IB_EVENT_QP_LAST_WQE_REACHED;
802 }
803
804 if (event.device && qp->ib_qp.event_handler)
805 qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
806
807 return 0;
808}
809
810static int bnxt_re_handle_affi_async_event(struct creq_qp_event *affi_async,
811 void *obj)
812{
813 int rc = 0;
814 u8 event;
815
816 if (!obj)
817 return rc;
818
819 event = affi_async->event;
820 if (event == CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION) {
821 struct bnxt_qplib_qp *lib_qp = obj;
822 struct bnxt_re_qp *qp = container_of(lib_qp, struct bnxt_re_qp,
823 qplib_qp);
824 rc = bnxt_re_handle_qp_async_event(affi_async, qp);
825 }
826 return rc;
827}
828
829static int bnxt_re_aeq_handler(struct bnxt_qplib_rcfw *rcfw,
830 void *aeqe, void *obj)
831{
832 struct creq_qp_event *affi_async;
833 struct creq_func_event *unaffi_async;
834 u8 type;
835 int rc;
836
837 type = ((struct creq_base *)aeqe)->type;
838 if (type == CREQ_BASE_TYPE_FUNC_EVENT) {
839 unaffi_async = aeqe;
840 rc = bnxt_re_handle_unaffi_async_event(unaffi_async);
841 } else {
842 affi_async = aeqe;
843 rc = bnxt_re_handle_affi_async_event(affi_async, obj);
844 }
845
846 return rc;
847}
848
849static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq,
850 struct bnxt_qplib_srq *handle, u8 event)
851{
852 struct bnxt_re_srq *srq = container_of(handle, struct bnxt_re_srq,
853 qplib_srq);
854 struct ib_event ib_event;
855 int rc = 0;
856
857 if (!srq) {
858 dev_err(NULL, "%s: SRQ is NULL, SRQN not handled",
859 ROCE_DRV_MODULE_NAME);
860 rc = -EINVAL;
861 goto done;
862 }
863 ib_event.device = &srq->rdev->ibdev;
864 ib_event.element.srq = &srq->ib_srq;
865 if (event == NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT)
866 ib_event.event = IB_EVENT_SRQ_LIMIT_REACHED;
867 else
868 ib_event.event = IB_EVENT_SRQ_ERR;
869
870 if (srq->ib_srq.event_handler) {
871
872 (*srq->ib_srq.event_handler)(&ib_event,
873 srq->ib_srq.srq_context);
874 }
875done:
876 return rc;
877}
878
879static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
880 struct bnxt_qplib_cq *handle)
881{
882 struct bnxt_re_cq *cq = container_of(handle, struct bnxt_re_cq,
883 qplib_cq);
884
885 if (!cq) {
886 dev_err(NULL, "%s: CQ is NULL, CQN not handled",
887 ROCE_DRV_MODULE_NAME);
888 return -EINVAL;
889 }
890 if (cq->ib_cq.comp_handler) {
891
892 (*cq->ib_cq.comp_handler)(&cq->ib_cq, cq->ib_cq.cq_context);
893 }
894
895 return 0;
896}
897
898static u32 bnxt_re_get_nqdb_offset(struct bnxt_re_dev *rdev, u16 indx)
899{
900 return bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx) ?
901 0x10000 : rdev->msix_entries[indx].db_offset;
902}
903
904static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
905{
906 int i;
907
908 for (i = 1; i < rdev->num_msix; i++)
909 bnxt_qplib_disable_nq(&rdev->nq[i - 1]);
910
911 if (rdev->qplib_res.rcfw)
912 bnxt_qplib_cleanup_res(&rdev->qplib_res);
913}
914
915static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
916{
917 int num_vec_enabled = 0;
918 int rc = 0, i;
919 u32 db_offt;
920
921 bnxt_qplib_init_res(&rdev->qplib_res);
922
923 for (i = 1; i < rdev->num_msix ; i++) {
924 db_offt = bnxt_re_get_nqdb_offset(rdev, i);
925 rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1],
926 i - 1, rdev->msix_entries[i].vector,
927 db_offt, &bnxt_re_cqn_handler,
928 &bnxt_re_srqn_handler);
929 if (rc) {
930 dev_err(rdev_to_dev(rdev),
931 "Failed to enable NQ with rc = 0x%x", rc);
932 goto fail;
933 }
934 num_vec_enabled++;
935 }
936 return 0;
937fail:
938 for (i = num_vec_enabled; i >= 0; i--)
939 bnxt_qplib_disable_nq(&rdev->nq[i]);
940 return rc;
941}
942
943static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev)
944{
945 u8 type;
946 int i;
947
948 for (i = 0; i < rdev->num_msix - 1; i++) {
949 type = bnxt_qplib_get_ring_type(&rdev->chip_ctx);
950 bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type);
951 rdev->nq[i].res = NULL;
952 bnxt_qplib_free_nq(&rdev->nq[i]);
953 }
954}
955
956static void bnxt_re_free_res(struct bnxt_re_dev *rdev)
957{
958 bnxt_re_free_nq_res(rdev);
959
960 if (rdev->qplib_res.dpi_tbl.max) {
961 bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
962 &rdev->qplib_res.dpi_tbl,
963 &rdev->dpi_privileged);
964 }
965 if (rdev->qplib_res.rcfw) {
966 bnxt_qplib_free_res(&rdev->qplib_res);
967 rdev->qplib_res.rcfw = NULL;
968 }
969}
970
971static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
972{
973 int num_vec_created = 0;
974 dma_addr_t *pg_map;
975 int rc = 0, i;
976 int pages;
977 u8 type;
978
979
980 rdev->qplib_res.rcfw = &rdev->rcfw;
981 rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr,
982 rdev->is_virtfn);
983 if (rc)
984 goto fail;
985
986 rc = bnxt_qplib_alloc_res(&rdev->qplib_res, rdev->en_dev->pdev,
987 rdev->netdev, &rdev->dev_attr);
988 if (rc)
989 goto fail;
990
991 rc = bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
992 &rdev->dpi_privileged,
993 rdev);
994 if (rc)
995 goto dealloc_res;
996
997 for (i = 0; i < rdev->num_msix - 1; i++) {
998 rdev->nq[i].res = &rdev->qplib_res;
999 rdev->nq[i].hwq.max_elements = BNXT_RE_MAX_CQ_COUNT +
1000 BNXT_RE_MAX_SRQC_COUNT + 2;
1001 rc = bnxt_qplib_alloc_nq(rdev->en_dev->pdev, &rdev->nq[i]);
1002 if (rc) {
1003 dev_err(rdev_to_dev(rdev), "Alloc Failed NQ%d rc:%#x",
1004 i, rc);
1005 goto free_nq;
1006 }
1007 type = bnxt_qplib_get_ring_type(&rdev->chip_ctx);
1008 pg_map = rdev->nq[i].hwq.pbl[PBL_LVL_0].pg_map_arr;
1009 pages = rdev->nq[i].hwq.pbl[rdev->nq[i].hwq.level].pg_count;
1010 rc = bnxt_re_net_ring_alloc(rdev, pg_map, pages, type,
1011 BNXT_QPLIB_NQE_MAX_CNT - 1,
1012 rdev->msix_entries[i + 1].ring_idx,
1013 &rdev->nq[i].ring_id);
1014 if (rc) {
1015 dev_err(rdev_to_dev(rdev),
1016 "Failed to allocate NQ fw id with rc = 0x%x",
1017 rc);
1018 bnxt_qplib_free_nq(&rdev->nq[i]);
1019 goto free_nq;
1020 }
1021 num_vec_created++;
1022 }
1023 return 0;
1024free_nq:
1025 for (i = num_vec_created; i >= 0; i--) {
1026 type = bnxt_qplib_get_ring_type(&rdev->chip_ctx);
1027 bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type);
1028 bnxt_qplib_free_nq(&rdev->nq[i]);
1029 }
1030 bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
1031 &rdev->qplib_res.dpi_tbl,
1032 &rdev->dpi_privileged);
1033dealloc_res:
1034 bnxt_qplib_free_res(&rdev->qplib_res);
1035
1036fail:
1037 rdev->qplib_res.rcfw = NULL;
1038 return rc;
1039}
1040
1041static void bnxt_re_dispatch_event(struct ib_device *ibdev, struct ib_qp *qp,
1042 u8 port_num, enum ib_event_type event)
1043{
1044 struct ib_event ib_event;
1045
1046 ib_event.device = ibdev;
1047 if (qp) {
1048 ib_event.element.qp = qp;
1049 ib_event.event = event;
1050 if (qp->event_handler)
1051 qp->event_handler(&ib_event, qp->qp_context);
1052
1053 } else {
1054 ib_event.element.port_num = port_num;
1055 ib_event.event = event;
1056 ib_dispatch_event(&ib_event);
1057 }
1058}
1059
1060#define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN 0x02
1061static int bnxt_re_query_hwrm_pri2cos(struct bnxt_re_dev *rdev, u8 dir,
1062 u64 *cid_map)
1063{
1064 struct hwrm_queue_pri2cos_qcfg_input req = {0};
1065 struct bnxt *bp = netdev_priv(rdev->netdev);
1066 struct hwrm_queue_pri2cos_qcfg_output resp;
1067 struct bnxt_en_dev *en_dev = rdev->en_dev;
1068 struct bnxt_fw_msg fw_msg;
1069 u32 flags = 0;
1070 u8 *qcfgmap, *tmp_map;
1071 int rc = 0, i;
1072
1073 if (!cid_map)
1074 return -EINVAL;
1075
1076 memset(&fw_msg, 0, sizeof(fw_msg));
1077 bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
1078 HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
1079 flags |= (dir & 0x01);
1080 flags |= HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN;
1081 req.flags = cpu_to_le32(flags);
1082 req.port_id = bp->pf.port_id;
1083
1084 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
1085 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
1086 rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
1087 if (rc)
1088 return rc;
1089
1090 if (resp.queue_cfg_info) {
1091 dev_warn(rdev_to_dev(rdev),
1092 "Asymmetric cos queue configuration detected");
1093 dev_warn(rdev_to_dev(rdev),
1094 " on device, QoS may not be fully functional\n");
1095 }
1096 qcfgmap = &resp.pri0_cos_queue_id;
1097 tmp_map = (u8 *)cid_map;
1098 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
1099 tmp_map[i] = qcfgmap[i];
1100
1101 return rc;
1102}
1103
1104static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev *rdev,
1105 struct bnxt_re_qp *qp)
1106{
1107 return (qp->ib_qp.qp_type == IB_QPT_GSI) || (qp == rdev->qp1_sqp);
1108}
1109
1110static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
1111{
1112 int mask = IB_QP_STATE;
1113 struct ib_qp_attr qp_attr;
1114 struct bnxt_re_qp *qp;
1115
1116 qp_attr.qp_state = IB_QPS_ERR;
1117 mutex_lock(&rdev->qp_lock);
1118 list_for_each_entry(qp, &rdev->qp_list, list) {
1119
1120 if (!bnxt_re_is_qp1_or_shadow_qp(rdev, qp)) {
1121 if (qp->qplib_qp.state !=
1122 CMDQ_MODIFY_QP_NEW_STATE_RESET &&
1123 qp->qplib_qp.state !=
1124 CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1125 bnxt_re_dispatch_event(&rdev->ibdev, &qp->ib_qp,
1126 1, IB_EVENT_QP_FATAL);
1127 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, mask,
1128 NULL);
1129 }
1130 }
1131 }
1132 mutex_unlock(&rdev->qp_lock);
1133}
1134
1135static int bnxt_re_update_gid(struct bnxt_re_dev *rdev)
1136{
1137 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
1138 struct bnxt_qplib_gid gid;
1139 u16 gid_idx, index;
1140 int rc = 0;
1141
1142 if (!test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
1143 return 0;
1144
1145 if (!sgid_tbl) {
1146 dev_err(rdev_to_dev(rdev), "QPLIB: SGID table not allocated");
1147 return -EINVAL;
1148 }
1149
1150 for (index = 0; index < sgid_tbl->active; index++) {
1151 gid_idx = sgid_tbl->hw_id[index];
1152
1153 if (!memcmp(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero,
1154 sizeof(bnxt_qplib_gid_zero)))
1155 continue;
1156
1157
1158
1159 if (sgid_tbl->vlan[index])
1160 continue;
1161
1162 memcpy(&gid, &sgid_tbl->tbl[index], sizeof(gid));
1163
1164 rc = bnxt_qplib_update_sgid(sgid_tbl, &gid, gid_idx,
1165 rdev->qplib_res.netdev->dev_addr);
1166 }
1167
1168 return rc;
1169}
1170
1171static u32 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev)
1172{
1173 u32 prio_map = 0, tmp_map = 0;
1174 struct net_device *netdev;
1175 struct dcb_app app;
1176
1177 netdev = rdev->netdev;
1178
1179 memset(&app, 0, sizeof(app));
1180 app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE;
1181 app.protocol = ETH_P_IBOE;
1182 tmp_map = dcb_ieee_getapp_mask(netdev, &app);
1183 prio_map = tmp_map;
1184
1185 app.selector = IEEE_8021QAZ_APP_SEL_DGRAM;
1186 app.protocol = ROCE_V2_UDP_DPORT;
1187 tmp_map = dcb_ieee_getapp_mask(netdev, &app);
1188 prio_map |= tmp_map;
1189
1190 return prio_map;
1191}
1192
1193static void bnxt_re_parse_cid_map(u8 prio_map, u8 *cid_map, u16 *cosq)
1194{
1195 u16 prio;
1196 u8 id;
1197
1198 for (prio = 0, id = 0; prio < 8; prio++) {
1199 if (prio_map & (1 << prio)) {
1200 cosq[id] = cid_map[prio];
1201 id++;
1202 if (id == 2)
1203 break;
1204 }
1205 }
1206}
1207
1208static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
1209{
1210 u8 prio_map = 0;
1211 u64 cid_map;
1212 int rc;
1213
1214
1215 prio_map = bnxt_re_get_priority_mask(rdev);
1216
1217 if (prio_map == rdev->cur_prio_map)
1218 return 0;
1219 rdev->cur_prio_map = prio_map;
1220
1221 rc = bnxt_re_query_hwrm_pri2cos(rdev, 0, &cid_map);
1222 if (rc) {
1223 dev_warn(rdev_to_dev(rdev), "no cos for p_mask %x\n", prio_map);
1224 return rc;
1225 }
1226
1227 bnxt_re_parse_cid_map(prio_map, (u8 *)&cid_map, rdev->cosq);
1228
1229
1230 rc = bnxt_qplib_map_tc2cos(&rdev->qplib_res, rdev->cosq);
1231 if (rc) {
1232 dev_warn(rdev_to_dev(rdev), "no tc for cos{%x, %x}\n",
1233 rdev->cosq[0], rdev->cosq[1]);
1234 return rc;
1235 }
1236
1237
1238
1239
1240 if ((prio_map == 0 && rdev->qplib_res.prio) ||
1241 (prio_map != 0 && !rdev->qplib_res.prio)) {
1242 rdev->qplib_res.prio = prio_map ? true : false;
1243
1244 bnxt_re_update_gid(rdev);
1245 }
1246
1247 return 0;
1248}
1249
1250static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
1251{
1252 struct bnxt_en_dev *en_dev = rdev->en_dev;
1253 struct hwrm_ver_get_output resp = {0};
1254 struct hwrm_ver_get_input req = {0};
1255 struct bnxt_fw_msg fw_msg;
1256 int rc = 0;
1257
1258 memset(&fw_msg, 0, sizeof(fw_msg));
1259 bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
1260 HWRM_VER_GET, -1, -1);
1261 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
1262 req.hwrm_intf_min = HWRM_VERSION_MINOR;
1263 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
1264 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
1265 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
1266 rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
1267 if (rc) {
1268 dev_err(rdev_to_dev(rdev),
1269 "Failed to query HW version, rc = 0x%x", rc);
1270 return;
1271 }
1272 rdev->qplib_ctx.hwrm_intf_ver =
1273 (u64)resp.hwrm_intf_major << 48 |
1274 (u64)resp.hwrm_intf_minor << 32 |
1275 (u64)resp.hwrm_intf_build << 16 |
1276 resp.hwrm_intf_patch;
1277}
1278
1279static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev)
1280{
1281 u8 type;
1282 int rc;
1283
1284 if (test_and_clear_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) {
1285
1286 bnxt_re_unregister_ib(rdev);
1287 }
1288 if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags))
1289 cancel_delayed_work_sync(&rdev->worker);
1290
1291 if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED,
1292 &rdev->flags))
1293 bnxt_re_cleanup_res(rdev);
1294 if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags))
1295 bnxt_re_free_res(rdev);
1296
1297 if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) {
1298 rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw);
1299 if (rc)
1300 dev_warn(rdev_to_dev(rdev),
1301 "Failed to deinitialize RCFW: %#x", rc);
1302 bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
1303 bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx);
1304 bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
1305 type = bnxt_qplib_get_ring_type(&rdev->chip_ctx);
1306 bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, type);
1307 bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
1308 }
1309 if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags)) {
1310 rc = bnxt_re_free_msix(rdev);
1311 if (rc)
1312 dev_warn(rdev_to_dev(rdev),
1313 "Failed to free MSI-X vectors: %#x", rc);
1314 }
1315
1316 bnxt_re_destroy_chip_ctx(rdev);
1317 if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) {
1318 rc = bnxt_re_unregister_netdev(rdev);
1319 if (rc)
1320 dev_warn(rdev_to_dev(rdev),
1321 "Failed to unregister with netdev: %#x", rc);
1322 }
1323}
1324
1325
1326static void bnxt_re_worker(struct work_struct *work)
1327{
1328 struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev,
1329 worker.work);
1330
1331 bnxt_re_setup_qos(rdev);
1332 schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
1333}
1334
1335static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
1336{
1337 dma_addr_t *pg_map;
1338 u32 db_offt, ridx;
1339 int pages, vid;
1340 bool locked;
1341 u8 type;
1342 int rc;
1343
1344
1345 rtnl_lock();
1346 locked = true;
1347
1348
1349 rc = bnxt_re_register_netdev(rdev);
1350 if (rc) {
1351 rtnl_unlock();
1352 pr_err("Failed to register with netedev: %#x\n", rc);
1353 return -EINVAL;
1354 }
1355 set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
1356
1357 rc = bnxt_re_setup_chip_ctx(rdev);
1358 if (rc) {
1359 dev_err(rdev_to_dev(rdev), "Failed to get chip context\n");
1360 return -EINVAL;
1361 }
1362
1363
1364 bnxt_re_get_sriov_func_type(rdev);
1365
1366 rc = bnxt_re_request_msix(rdev);
1367 if (rc) {
1368 pr_err("Failed to get MSI-X vectors: %#x\n", rc);
1369 rc = -EINVAL;
1370 goto fail;
1371 }
1372 set_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags);
1373
1374 bnxt_re_query_hwrm_intf_version(rdev);
1375
1376
1377
1378
1379 rc = bnxt_qplib_alloc_rcfw_channel(rdev->en_dev->pdev, &rdev->rcfw,
1380 &rdev->qplib_ctx,
1381 BNXT_RE_MAX_QPC_COUNT);
1382 if (rc) {
1383 pr_err("Failed to allocate RCFW Channel: %#x\n", rc);
1384 goto fail;
1385 }
1386 type = bnxt_qplib_get_ring_type(&rdev->chip_ctx);
1387 pg_map = rdev->rcfw.creq.pbl[PBL_LVL_0].pg_map_arr;
1388 pages = rdev->rcfw.creq.pbl[rdev->rcfw.creq.level].pg_count;
1389 ridx = rdev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx;
1390 rc = bnxt_re_net_ring_alloc(rdev, pg_map, pages, type,
1391 BNXT_QPLIB_CREQE_MAX_CNT - 1,
1392 ridx, &rdev->rcfw.creq_ring_id);
1393 if (rc) {
1394 pr_err("Failed to allocate CREQ: %#x\n", rc);
1395 goto free_rcfw;
1396 }
1397 db_offt = bnxt_re_get_nqdb_offset(rdev, BNXT_RE_AEQ_IDX);
1398 vid = rdev->msix_entries[BNXT_RE_AEQ_IDX].vector;
1399 rc = bnxt_qplib_enable_rcfw_channel(rdev->en_dev->pdev, &rdev->rcfw,
1400 vid, db_offt, rdev->is_virtfn,
1401 &bnxt_re_aeq_handler);
1402 if (rc) {
1403 pr_err("Failed to enable RCFW channel: %#x\n", rc);
1404 goto free_ring;
1405 }
1406
1407 rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr,
1408 rdev->is_virtfn);
1409 if (rc)
1410 goto disable_rcfw;
1411 if (!rdev->is_virtfn)
1412 bnxt_re_set_resource_limits(rdev);
1413
1414 rc = bnxt_qplib_alloc_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx, 0,
1415 bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx));
1416 if (rc) {
1417 pr_err("Failed to allocate QPLIB context: %#x\n", rc);
1418 goto disable_rcfw;
1419 }
1420 rc = bnxt_re_net_stats_ctx_alloc(rdev,
1421 rdev->qplib_ctx.stats.dma_map,
1422 &rdev->qplib_ctx.stats.fw_id);
1423 if (rc) {
1424 pr_err("Failed to allocate stats context: %#x\n", rc);
1425 goto free_ctx;
1426 }
1427
1428 rc = bnxt_qplib_init_rcfw(&rdev->rcfw, &rdev->qplib_ctx,
1429 rdev->is_virtfn);
1430 if (rc) {
1431 pr_err("Failed to initialize RCFW: %#x\n", rc);
1432 goto free_sctx;
1433 }
1434 set_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags);
1435
1436
1437 rc = bnxt_re_alloc_res(rdev);
1438 if (rc) {
1439 pr_err("Failed to allocate resources: %#x\n", rc);
1440 goto fail;
1441 }
1442 set_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags);
1443 rc = bnxt_re_init_res(rdev);
1444 if (rc) {
1445 pr_err("Failed to initialize resources: %#x\n", rc);
1446 goto fail;
1447 }
1448
1449 set_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED, &rdev->flags);
1450
1451 if (!rdev->is_virtfn) {
1452 rc = bnxt_re_setup_qos(rdev);
1453 if (rc)
1454 pr_info("RoCE priority not yet configured\n");
1455
1456 INIT_DELAYED_WORK(&rdev->worker, bnxt_re_worker);
1457 set_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags);
1458 schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
1459 }
1460
1461 rtnl_unlock();
1462 locked = false;
1463
1464
1465 rc = bnxt_re_register_ib(rdev);
1466 if (rc) {
1467 pr_err("Failed to register with IB: %#x\n", rc);
1468 goto fail;
1469 }
1470 set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
1471 dev_info(rdev_to_dev(rdev), "Device registered successfully");
1472 ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
1473 &rdev->active_width);
1474 set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
1475 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE);
1476
1477 return 0;
1478free_sctx:
1479 bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
1480free_ctx:
1481 bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx);
1482disable_rcfw:
1483 bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
1484free_ring:
1485 type = bnxt_qplib_get_ring_type(&rdev->chip_ctx);
1486 bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, type);
1487free_rcfw:
1488 bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
1489fail:
1490 if (!locked)
1491 rtnl_lock();
1492 bnxt_re_ib_unreg(rdev);
1493 rtnl_unlock();
1494
1495 return rc;
1496}
1497
1498static void bnxt_re_dev_unreg(struct bnxt_re_dev *rdev)
1499{
1500 struct bnxt_en_dev *en_dev = rdev->en_dev;
1501 struct net_device *netdev = rdev->netdev;
1502
1503 bnxt_re_dev_remove(rdev);
1504
1505 if (netdev)
1506 bnxt_re_dev_unprobe(netdev, en_dev);
1507}
1508
1509static int bnxt_re_dev_reg(struct bnxt_re_dev **rdev, struct net_device *netdev)
1510{
1511 struct bnxt_en_dev *en_dev;
1512 int rc = 0;
1513
1514 if (!is_bnxt_re_dev(netdev))
1515 return -ENODEV;
1516
1517 en_dev = bnxt_re_dev_probe(netdev);
1518 if (IS_ERR(en_dev)) {
1519 if (en_dev != ERR_PTR(-ENODEV))
1520 pr_err("%s: Failed to probe\n", ROCE_DRV_MODULE_NAME);
1521 rc = PTR_ERR(en_dev);
1522 goto exit;
1523 }
1524 *rdev = bnxt_re_dev_add(netdev, en_dev);
1525 if (!*rdev) {
1526 rc = -ENOMEM;
1527 bnxt_re_dev_unprobe(netdev, en_dev);
1528 goto exit;
1529 }
1530exit:
1531 return rc;
1532}
1533
1534static void bnxt_re_remove_one(struct bnxt_re_dev *rdev)
1535{
1536 pci_dev_put(rdev->en_dev->pdev);
1537}
1538
1539
1540static void bnxt_re_task(struct work_struct *work)
1541{
1542 struct bnxt_re_work *re_work;
1543 struct bnxt_re_dev *rdev;
1544 int rc = 0;
1545
1546 re_work = container_of(work, struct bnxt_re_work, work);
1547 rdev = re_work->rdev;
1548
1549 if (re_work->event != NETDEV_REGISTER &&
1550 !test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
1551 return;
1552
1553 switch (re_work->event) {
1554 case NETDEV_REGISTER:
1555 rc = bnxt_re_ib_reg(rdev);
1556 if (rc) {
1557 dev_err(rdev_to_dev(rdev),
1558 "Failed to register with IB: %#x", rc);
1559 bnxt_re_remove_one(rdev);
1560 bnxt_re_dev_unreg(rdev);
1561 goto exit;
1562 }
1563 break;
1564 case NETDEV_UP:
1565 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
1566 IB_EVENT_PORT_ACTIVE);
1567 break;
1568 case NETDEV_DOWN:
1569 bnxt_re_dev_stop(rdev);
1570 break;
1571 case NETDEV_CHANGE:
1572 if (!netif_carrier_ok(rdev->netdev))
1573 bnxt_re_dev_stop(rdev);
1574 else if (netif_carrier_ok(rdev->netdev))
1575 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
1576 IB_EVENT_PORT_ACTIVE);
1577 ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
1578 &rdev->active_width);
1579 break;
1580 default:
1581 break;
1582 }
1583 smp_mb__before_atomic();
1584 atomic_dec(&rdev->sched_count);
1585exit:
1586 kfree(re_work);
1587}
1588
1589static void bnxt_re_init_one(struct bnxt_re_dev *rdev)
1590{
1591 pci_dev_get(rdev->en_dev->pdev);
1592}
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608static int bnxt_re_netdev_event(struct notifier_block *notifier,
1609 unsigned long event, void *ptr)
1610{
1611 struct net_device *real_dev, *netdev = netdev_notifier_info_to_dev(ptr);
1612 struct bnxt_re_work *re_work;
1613 struct bnxt_re_dev *rdev;
1614 int rc = 0;
1615 bool sch_work = false;
1616
1617 real_dev = rdma_vlan_dev_real_dev(netdev);
1618 if (!real_dev)
1619 real_dev = netdev;
1620
1621 rdev = bnxt_re_from_netdev(real_dev);
1622 if (!rdev && event != NETDEV_REGISTER)
1623 goto exit;
1624 if (real_dev != netdev)
1625 goto exit;
1626
1627 switch (event) {
1628 case NETDEV_REGISTER:
1629 if (rdev)
1630 break;
1631 rc = bnxt_re_dev_reg(&rdev, real_dev);
1632 if (rc == -ENODEV)
1633 break;
1634 if (rc) {
1635 pr_err("Failed to register with the device %s: %#x\n",
1636 real_dev->name, rc);
1637 break;
1638 }
1639 bnxt_re_init_one(rdev);
1640 sch_work = true;
1641 break;
1642
1643 case NETDEV_UNREGISTER:
1644
1645
1646
1647 if (atomic_read(&rdev->sched_count) > 0)
1648 goto exit;
1649 bnxt_re_ib_unreg(rdev);
1650 bnxt_re_remove_one(rdev);
1651 bnxt_re_dev_unreg(rdev);
1652 break;
1653
1654 default:
1655 sch_work = true;
1656 break;
1657 }
1658 if (sch_work) {
1659
1660 re_work = kzalloc(sizeof(*re_work), GFP_ATOMIC);
1661 if (re_work) {
1662 re_work->rdev = rdev;
1663 re_work->event = event;
1664 re_work->vlan_dev = (real_dev == netdev ?
1665 NULL : netdev);
1666 INIT_WORK(&re_work->work, bnxt_re_task);
1667 atomic_inc(&rdev->sched_count);
1668 queue_work(bnxt_re_wq, &re_work->work);
1669 }
1670 }
1671
1672exit:
1673 return NOTIFY_DONE;
1674}
1675
1676static struct notifier_block bnxt_re_netdev_notifier = {
1677 .notifier_call = bnxt_re_netdev_event
1678};
1679
1680static int __init bnxt_re_mod_init(void)
1681{
1682 int rc = 0;
1683
1684 pr_info("%s: %s", ROCE_DRV_MODULE_NAME, version);
1685
1686 bnxt_re_wq = create_singlethread_workqueue("bnxt_re");
1687 if (!bnxt_re_wq)
1688 return -ENOMEM;
1689
1690 INIT_LIST_HEAD(&bnxt_re_dev_list);
1691
1692 rc = register_netdevice_notifier(&bnxt_re_netdev_notifier);
1693 if (rc) {
1694 pr_err("%s: Cannot register to netdevice_notifier",
1695 ROCE_DRV_MODULE_NAME);
1696 goto err_netdev;
1697 }
1698 return 0;
1699
1700err_netdev:
1701 destroy_workqueue(bnxt_re_wq);
1702
1703 return rc;
1704}
1705
1706static void __exit bnxt_re_mod_exit(void)
1707{
1708 struct bnxt_re_dev *rdev, *next;
1709 LIST_HEAD(to_be_deleted);
1710
1711 mutex_lock(&bnxt_re_dev_lock);
1712
1713 if (!list_empty(&bnxt_re_dev_list))
1714 list_splice_init(&bnxt_re_dev_list, &to_be_deleted);
1715 mutex_unlock(&bnxt_re_dev_lock);
1716
1717
1718
1719
1720 list_for_each_entry_safe_reverse(rdev, next, &to_be_deleted, list) {
1721 dev_info(rdev_to_dev(rdev), "Unregistering Device");
1722
1723
1724
1725
1726 flush_workqueue(bnxt_re_wq);
1727 bnxt_re_dev_stop(rdev);
1728
1729 rtnl_lock();
1730 bnxt_re_ib_unreg(rdev);
1731 rtnl_unlock();
1732 bnxt_re_remove_one(rdev);
1733 bnxt_re_dev_unreg(rdev);
1734 }
1735 unregister_netdevice_notifier(&bnxt_re_netdev_notifier);
1736 if (bnxt_re_wq)
1737 destroy_workqueue(bnxt_re_wq);
1738}
1739
1740module_init(bnxt_re_mod_init);
1741module_exit(bnxt_re_mod_exit);
1742