1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#include <linux/module.h>
33#include <rdma/ib_verbs.h>
34#include <rdma/ib_addr.h>
35#include <rdma/ib_user_verbs.h>
36#include <rdma/iw_cm.h>
37#include <rdma/ib_mad.h>
38#include <linux/netdevice.h>
39#include <linux/iommu.h>
40#include <linux/pci.h>
41#include <net/addrconf.h>
42
43#include <linux/qed/qed_chain.h>
44#include <linux/qed/qed_if.h>
45#include "qedr.h"
46#include "verbs.h"
47#include <rdma/qedr-abi.h>
48#include "qedr_iw_cm.h"
49
50MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver");
51MODULE_AUTHOR("QLogic Corporation");
52MODULE_LICENSE("Dual BSD/GPL");
53
54#define QEDR_WQ_MULTIPLIER_DFT (3)
55
56static void qedr_ib_dispatch_event(struct qedr_dev *dev, u32 port_num,
57 enum ib_event_type type)
58{
59 struct ib_event ibev;
60
61 ibev.device = &dev->ibdev;
62 ibev.element.port_num = port_num;
63 ibev.event = type;
64
65 ib_dispatch_event(&ibev);
66}
67
68static enum rdma_link_layer qedr_link_layer(struct ib_device *device,
69 u32 port_num)
70{
71 return IB_LINK_LAYER_ETHERNET;
72}
73
74static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str)
75{
76 struct qedr_dev *qedr = get_qedr_dev(ibdev);
77 u32 fw_ver = (u32)qedr->attr.fw_ver;
78
79 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
80 (fw_ver >> 24) & 0xFF, (fw_ver >> 16) & 0xFF,
81 (fw_ver >> 8) & 0xFF, fw_ver & 0xFF);
82}
83
84static int qedr_roce_port_immutable(struct ib_device *ibdev, u32 port_num,
85 struct ib_port_immutable *immutable)
86{
87 struct ib_port_attr attr;
88 int err;
89
90 err = qedr_query_port(ibdev, port_num, &attr);
91 if (err)
92 return err;
93
94 immutable->pkey_tbl_len = attr.pkey_tbl_len;
95 immutable->gid_tbl_len = attr.gid_tbl_len;
96 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
97 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
98 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
99
100 return 0;
101}
102
103static int qedr_iw_port_immutable(struct ib_device *ibdev, u32 port_num,
104 struct ib_port_immutable *immutable)
105{
106 struct ib_port_attr attr;
107 int err;
108
109 err = qedr_query_port(ibdev, port_num, &attr);
110 if (err)
111 return err;
112
113 immutable->gid_tbl_len = 1;
114 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
115 immutable->max_mad_size = 0;
116
117 return 0;
118}
119
120
121static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
122 char *buf)
123{
124 struct qedr_dev *dev =
125 rdma_device_to_drv_device(device, struct qedr_dev, ibdev);
126
127 return sysfs_emit(buf, "0x%x\n", dev->attr.hw_ver);
128}
129static DEVICE_ATTR_RO(hw_rev);
130
131static ssize_t hca_type_show(struct device *device,
132 struct device_attribute *attr, char *buf)
133{
134 struct qedr_dev *dev =
135 rdma_device_to_drv_device(device, struct qedr_dev, ibdev);
136
137 return sysfs_emit(buf, "FastLinQ QL%x %s\n", dev->pdev->device,
138 rdma_protocol_iwarp(&dev->ibdev, 1) ? "iWARP" :
139 "RoCE");
140}
141static DEVICE_ATTR_RO(hca_type);
142
143static struct attribute *qedr_attributes[] = {
144 &dev_attr_hw_rev.attr,
145 &dev_attr_hca_type.attr,
146 NULL
147};
148
149static const struct attribute_group qedr_attr_group = {
150 .attrs = qedr_attributes,
151};
152
153static const struct ib_device_ops qedr_iw_dev_ops = {
154 .get_port_immutable = qedr_iw_port_immutable,
155 .iw_accept = qedr_iw_accept,
156 .iw_add_ref = qedr_iw_qp_add_ref,
157 .iw_connect = qedr_iw_connect,
158 .iw_create_listen = qedr_iw_create_listen,
159 .iw_destroy_listen = qedr_iw_destroy_listen,
160 .iw_get_qp = qedr_iw_get_qp,
161 .iw_reject = qedr_iw_reject,
162 .iw_rem_ref = qedr_iw_qp_rem_ref,
163 .query_gid = qedr_iw_query_gid,
164};
165
166static int qedr_iw_register_device(struct qedr_dev *dev)
167{
168 dev->ibdev.node_type = RDMA_NODE_RNIC;
169
170 ib_set_device_ops(&dev->ibdev, &qedr_iw_dev_ops);
171
172 memcpy(dev->ibdev.iw_ifname,
173 dev->ndev->name, sizeof(dev->ibdev.iw_ifname));
174
175 return 0;
176}
177
178static const struct ib_device_ops qedr_roce_dev_ops = {
179 .alloc_xrcd = qedr_alloc_xrcd,
180 .dealloc_xrcd = qedr_dealloc_xrcd,
181 .get_port_immutable = qedr_roce_port_immutable,
182 .query_pkey = qedr_query_pkey,
183};
184
185static void qedr_roce_register_device(struct qedr_dev *dev)
186{
187 dev->ibdev.node_type = RDMA_NODE_IB_CA;
188
189 ib_set_device_ops(&dev->ibdev, &qedr_roce_dev_ops);
190}
191
192static const struct ib_device_ops qedr_dev_ops = {
193 .owner = THIS_MODULE,
194 .driver_id = RDMA_DRIVER_QEDR,
195 .uverbs_abi_ver = QEDR_ABI_VERSION,
196
197 .alloc_mr = qedr_alloc_mr,
198 .alloc_pd = qedr_alloc_pd,
199 .alloc_ucontext = qedr_alloc_ucontext,
200 .create_ah = qedr_create_ah,
201 .create_cq = qedr_create_cq,
202 .create_qp = qedr_create_qp,
203 .create_srq = qedr_create_srq,
204 .dealloc_pd = qedr_dealloc_pd,
205 .dealloc_ucontext = qedr_dealloc_ucontext,
206 .dereg_mr = qedr_dereg_mr,
207 .destroy_ah = qedr_destroy_ah,
208 .destroy_cq = qedr_destroy_cq,
209 .destroy_qp = qedr_destroy_qp,
210 .destroy_srq = qedr_destroy_srq,
211 .device_group = &qedr_attr_group,
212 .get_dev_fw_str = qedr_get_dev_fw_str,
213 .get_dma_mr = qedr_get_dma_mr,
214 .get_link_layer = qedr_link_layer,
215 .map_mr_sg = qedr_map_mr_sg,
216 .mmap = qedr_mmap,
217 .mmap_free = qedr_mmap_free,
218 .modify_qp = qedr_modify_qp,
219 .modify_srq = qedr_modify_srq,
220 .poll_cq = qedr_poll_cq,
221 .post_recv = qedr_post_recv,
222 .post_send = qedr_post_send,
223 .post_srq_recv = qedr_post_srq_recv,
224 .process_mad = qedr_process_mad,
225 .query_device = qedr_query_device,
226 .query_port = qedr_query_port,
227 .query_qp = qedr_query_qp,
228 .query_srq = qedr_query_srq,
229 .reg_user_mr = qedr_reg_user_mr,
230 .req_notify_cq = qedr_arm_cq,
231 .resize_cq = qedr_resize_cq,
232
233 INIT_RDMA_OBJ_SIZE(ib_ah, qedr_ah, ibah),
234 INIT_RDMA_OBJ_SIZE(ib_cq, qedr_cq, ibcq),
235 INIT_RDMA_OBJ_SIZE(ib_pd, qedr_pd, ibpd),
236 INIT_RDMA_OBJ_SIZE(ib_qp, qedr_qp, ibqp),
237 INIT_RDMA_OBJ_SIZE(ib_srq, qedr_srq, ibsrq),
238 INIT_RDMA_OBJ_SIZE(ib_xrcd, qedr_xrcd, ibxrcd),
239 INIT_RDMA_OBJ_SIZE(ib_ucontext, qedr_ucontext, ibucontext),
240};
241
242static int qedr_register_device(struct qedr_dev *dev)
243{
244 int rc;
245
246 dev->ibdev.node_guid = dev->attr.node_guid;
247 memcpy(dev->ibdev.node_desc, QEDR_NODE_DESC, sizeof(QEDR_NODE_DESC));
248
249 if (IS_IWARP(dev)) {
250 rc = qedr_iw_register_device(dev);
251 if (rc)
252 return rc;
253 } else {
254 qedr_roce_register_device(dev);
255 }
256
257 dev->ibdev.phys_port_cnt = 1;
258 dev->ibdev.num_comp_vectors = dev->num_cnq;
259 dev->ibdev.dev.parent = &dev->pdev->dev;
260
261 ib_set_device_ops(&dev->ibdev, &qedr_dev_ops);
262
263 rc = ib_device_set_netdev(&dev->ibdev, dev->ndev, 1);
264 if (rc)
265 return rc;
266
267 dma_set_max_seg_size(&dev->pdev->dev, UINT_MAX);
268 return ib_register_device(&dev->ibdev, "qedr%d", &dev->pdev->dev);
269}
270
271
272static int qedr_alloc_mem_sb(struct qedr_dev *dev,
273 struct qed_sb_info *sb_info, u16 sb_id)
274{
275 struct status_block_e4 *sb_virt;
276 dma_addr_t sb_phys;
277 int rc;
278
279 sb_virt = dma_alloc_coherent(&dev->pdev->dev,
280 sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
281 if (!sb_virt)
282 return -ENOMEM;
283
284 rc = dev->ops->common->sb_init(dev->cdev, sb_info,
285 sb_virt, sb_phys, sb_id,
286 QED_SB_TYPE_CNQ);
287 if (rc) {
288 pr_err("Status block initialization failed\n");
289 dma_free_coherent(&dev->pdev->dev, sizeof(*sb_virt),
290 sb_virt, sb_phys);
291 return rc;
292 }
293
294 return 0;
295}
296
297static void qedr_free_mem_sb(struct qedr_dev *dev,
298 struct qed_sb_info *sb_info, int sb_id)
299{
300 if (sb_info->sb_virt) {
301 dev->ops->common->sb_release(dev->cdev, sb_info, sb_id,
302 QED_SB_TYPE_CNQ);
303 dma_free_coherent(&dev->pdev->dev, sizeof(*sb_info->sb_virt),
304 (void *)sb_info->sb_virt, sb_info->sb_phys);
305 }
306}
307
308static void qedr_free_resources(struct qedr_dev *dev)
309{
310 int i;
311
312 if (IS_IWARP(dev))
313 destroy_workqueue(dev->iwarp_wq);
314
315 for (i = 0; i < dev->num_cnq; i++) {
316 qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
317 dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
318 }
319
320 kfree(dev->cnq_array);
321 kfree(dev->sb_array);
322 kfree(dev->sgid_tbl);
323}
324
325static int qedr_alloc_resources(struct qedr_dev *dev)
326{
327 struct qed_chain_init_params params = {
328 .mode = QED_CHAIN_MODE_PBL,
329 .intended_use = QED_CHAIN_USE_TO_CONSUME,
330 .cnt_type = QED_CHAIN_CNT_TYPE_U16,
331 .elem_size = sizeof(struct regpair *),
332 };
333 struct qedr_cnq *cnq;
334 __le16 *cons_pi;
335 int i, rc;
336
337 dev->sgid_tbl = kcalloc(QEDR_MAX_SGID, sizeof(union ib_gid),
338 GFP_KERNEL);
339 if (!dev->sgid_tbl)
340 return -ENOMEM;
341
342 spin_lock_init(&dev->sgid_lock);
343 xa_init_flags(&dev->srqs, XA_FLAGS_LOCK_IRQ);
344
345 if (IS_IWARP(dev)) {
346 xa_init(&dev->qps);
347 dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq");
348 }
349
350
351 dev->sb_array = kcalloc(dev->num_cnq, sizeof(*dev->sb_array),
352 GFP_KERNEL);
353 if (!dev->sb_array) {
354 rc = -ENOMEM;
355 goto err1;
356 }
357
358 dev->cnq_array = kcalloc(dev->num_cnq,
359 sizeof(*dev->cnq_array), GFP_KERNEL);
360 if (!dev->cnq_array) {
361 rc = -ENOMEM;
362 goto err2;
363 }
364
365 dev->sb_start = dev->ops->rdma_get_start_sb(dev->cdev);
366
367
368 params.num_elems = min_t(u32, QED_RDMA_MAX_CNQ_SIZE,
369 QEDR_ROCE_MAX_CNQ_SIZE);
370
371 for (i = 0; i < dev->num_cnq; i++) {
372 cnq = &dev->cnq_array[i];
373
374 rc = qedr_alloc_mem_sb(dev, &dev->sb_array[i],
375 dev->sb_start + i);
376 if (rc)
377 goto err3;
378
379 rc = dev->ops->common->chain_alloc(dev->cdev, &cnq->pbl,
380 ¶ms);
381 if (rc)
382 goto err4;
383
384 cnq->dev = dev;
385 cnq->sb = &dev->sb_array[i];
386 cons_pi = dev->sb_array[i].sb_virt->pi_array;
387 cnq->hw_cons_ptr = &cons_pi[QED_ROCE_PROTOCOL_INDEX];
388 cnq->index = i;
389 sprintf(cnq->name, "qedr%d@pci:%s", i, pci_name(dev->pdev));
390
391 DP_DEBUG(dev, QEDR_MSG_INIT, "cnq[%d].cons=%d\n",
392 i, qed_chain_get_cons_idx(&cnq->pbl));
393 }
394
395 return 0;
396err4:
397 qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
398err3:
399 for (--i; i >= 0; i--) {
400 dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
401 qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
402 }
403 kfree(dev->cnq_array);
404err2:
405 kfree(dev->sb_array);
406err1:
407 kfree(dev->sgid_tbl);
408 return rc;
409}
410
411static void qedr_pci_set_atomic(struct qedr_dev *dev, struct pci_dev *pdev)
412{
413 int rc = pci_enable_atomic_ops_to_root(pdev,
414 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
415
416 if (rc) {
417 dev->atomic_cap = IB_ATOMIC_NONE;
418 DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability disabled\n");
419 } else {
420 dev->atomic_cap = IB_ATOMIC_GLOB;
421 DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability enabled\n");
422 }
423}
424
425static const struct qed_rdma_ops *qed_ops;
426
427#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
428
429static irqreturn_t qedr_irq_handler(int irq, void *handle)
430{
431 u16 hw_comp_cons, sw_comp_cons;
432 struct qedr_cnq *cnq = handle;
433 struct regpair *cq_handle;
434 struct qedr_cq *cq;
435
436 qed_sb_ack(cnq->sb, IGU_INT_DISABLE, 0);
437
438 qed_sb_update_sb_idx(cnq->sb);
439
440 hw_comp_cons = le16_to_cpu(*cnq->hw_cons_ptr);
441 sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
442
443
444 rmb();
445
446 while (sw_comp_cons != hw_comp_cons) {
447 cq_handle = (struct regpair *)qed_chain_consume(&cnq->pbl);
448 cq = (struct qedr_cq *)(uintptr_t)HILO_U64(cq_handle->hi,
449 cq_handle->lo);
450
451 if (cq == NULL) {
452 DP_ERR(cnq->dev,
453 "Received NULL CQ cq_handle->hi=%d cq_handle->lo=%d sw_comp_cons=%d hw_comp_cons=%d\n",
454 cq_handle->hi, cq_handle->lo, sw_comp_cons,
455 hw_comp_cons);
456
457 break;
458 }
459
460 if (cq->sig != QEDR_CQ_MAGIC_NUMBER) {
461 DP_ERR(cnq->dev,
462 "Problem with cq signature, cq_handle->hi=%d ch_handle->lo=%d cq=%p\n",
463 cq_handle->hi, cq_handle->lo, cq);
464 break;
465 }
466
467 cq->arm_flags = 0;
468
469 if (!cq->destroyed && cq->ibcq.comp_handler)
470 (*cq->ibcq.comp_handler)
471 (&cq->ibcq, cq->ibcq.cq_context);
472
473
474
475
476
477
478
479 cq->cnq_notif++;
480
481 sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
482
483 cnq->n_comp++;
484 }
485
486 qed_ops->rdma_cnq_prod_update(cnq->dev->rdma_ctx, cnq->index,
487 sw_comp_cons);
488
489 qed_sb_ack(cnq->sb, IGU_INT_ENABLE, 1);
490
491 return IRQ_HANDLED;
492}
493
494static void qedr_sync_free_irqs(struct qedr_dev *dev)
495{
496 u32 vector;
497 u16 idx;
498 int i;
499
500 for (i = 0; i < dev->int_info.used_cnt; i++) {
501 if (dev->int_info.msix_cnt) {
502 idx = i * dev->num_hwfns + dev->affin_hwfn_idx;
503 vector = dev->int_info.msix[idx].vector;
504 synchronize_irq(vector);
505 free_irq(vector, &dev->cnq_array[i]);
506 }
507 }
508
509 dev->int_info.used_cnt = 0;
510}
511
512static int qedr_req_msix_irqs(struct qedr_dev *dev)
513{
514 int i, rc = 0;
515 u16 idx;
516
517 if (dev->num_cnq > dev->int_info.msix_cnt) {
518 DP_ERR(dev,
519 "Interrupt mismatch: %d CNQ queues > %d MSI-x vectors\n",
520 dev->num_cnq, dev->int_info.msix_cnt);
521 return -EINVAL;
522 }
523
524 for (i = 0; i < dev->num_cnq; i++) {
525 idx = i * dev->num_hwfns + dev->affin_hwfn_idx;
526 rc = request_irq(dev->int_info.msix[idx].vector,
527 qedr_irq_handler, 0, dev->cnq_array[i].name,
528 &dev->cnq_array[i]);
529 if (rc) {
530 DP_ERR(dev, "Request cnq %d irq failed\n", i);
531 qedr_sync_free_irqs(dev);
532 } else {
533 DP_DEBUG(dev, QEDR_MSG_INIT,
534 "Requested cnq irq for %s [entry %d]. Cookie is at %p\n",
535 dev->cnq_array[i].name, i,
536 &dev->cnq_array[i]);
537 dev->int_info.used_cnt++;
538 }
539 }
540
541 return rc;
542}
543
544static int qedr_setup_irqs(struct qedr_dev *dev)
545{
546 int rc;
547
548 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs\n");
549
550
551 rc = dev->ops->rdma_set_rdma_int(dev->cdev, dev->num_cnq);
552 if (rc < 0)
553 return rc;
554
555 rc = dev->ops->rdma_get_rdma_int(dev->cdev, &dev->int_info);
556 if (rc) {
557 DP_DEBUG(dev, QEDR_MSG_INIT, "get_rdma_int failed\n");
558 return rc;
559 }
560
561 if (dev->int_info.msix_cnt) {
562 DP_DEBUG(dev, QEDR_MSG_INIT, "rdma msix_cnt = %d\n",
563 dev->int_info.msix_cnt);
564 rc = qedr_req_msix_irqs(dev);
565 if (rc)
566 return rc;
567 }
568
569 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs succeeded\n");
570
571 return 0;
572}
573
574static int qedr_set_device_attr(struct qedr_dev *dev)
575{
576 struct qed_rdma_device *qed_attr;
577 struct qedr_device_attr *attr;
578 u32 page_size;
579
580
581 qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx);
582
583
584 page_size = ~qed_attr->page_size_caps + 1;
585 if (page_size > PAGE_SIZE) {
586 DP_ERR(dev,
587 "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n",
588 PAGE_SIZE, page_size);
589 return -ENODEV;
590 }
591
592
593 attr = &dev->attr;
594 attr->vendor_id = qed_attr->vendor_id;
595 attr->vendor_part_id = qed_attr->vendor_part_id;
596 attr->hw_ver = qed_attr->hw_ver;
597 attr->fw_ver = qed_attr->fw_ver;
598 attr->node_guid = qed_attr->node_guid;
599 attr->sys_image_guid = qed_attr->sys_image_guid;
600 attr->max_cnq = qed_attr->max_cnq;
601 attr->max_sge = qed_attr->max_sge;
602 attr->max_inline = qed_attr->max_inline;
603 attr->max_sqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_SQE);
604 attr->max_rqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_RQE);
605 attr->max_qp_resp_rd_atomic_resc = qed_attr->max_qp_resp_rd_atomic_resc;
606 attr->max_qp_req_rd_atomic_resc = qed_attr->max_qp_req_rd_atomic_resc;
607 attr->max_dev_resp_rd_atomic_resc =
608 qed_attr->max_dev_resp_rd_atomic_resc;
609 attr->max_cq = qed_attr->max_cq;
610 attr->max_qp = qed_attr->max_qp;
611 attr->max_mr = qed_attr->max_mr;
612 attr->max_mr_size = qed_attr->max_mr_size;
613 attr->max_cqe = min_t(u64, qed_attr->max_cqe, QEDR_MAX_CQES);
614 attr->max_mw = qed_attr->max_mw;
615 attr->max_mr_mw_fmr_pbl = qed_attr->max_mr_mw_fmr_pbl;
616 attr->max_mr_mw_fmr_size = qed_attr->max_mr_mw_fmr_size;
617 attr->max_pd = qed_attr->max_pd;
618 attr->max_ah = qed_attr->max_ah;
619 attr->max_pkey = qed_attr->max_pkey;
620 attr->max_srq = qed_attr->max_srq;
621 attr->max_srq_wr = qed_attr->max_srq_wr;
622 attr->dev_caps = qed_attr->dev_caps;
623 attr->page_size_caps = qed_attr->page_size_caps;
624 attr->dev_ack_delay = qed_attr->dev_ack_delay;
625 attr->reserved_lkey = qed_attr->reserved_lkey;
626 attr->bad_pkey_counter = qed_attr->bad_pkey_counter;
627 attr->max_stats_queues = qed_attr->max_stats_queues;
628
629 return 0;
630}
631
632static void qedr_unaffiliated_event(void *context, u8 event_code)
633{
634 pr_err("unaffiliated event not implemented yet\n");
635}
636
637static void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
638{
639#define EVENT_TYPE_NOT_DEFINED 0
640#define EVENT_TYPE_CQ 1
641#define EVENT_TYPE_QP 2
642#define EVENT_TYPE_SRQ 3
643 struct qedr_dev *dev = (struct qedr_dev *)context;
644 struct regpair *async_handle = (struct regpair *)fw_handle;
645 u64 roce_handle64 = ((u64) async_handle->hi << 32) + async_handle->lo;
646 u8 event_type = EVENT_TYPE_NOT_DEFINED;
647 struct ib_event event;
648 struct ib_srq *ibsrq;
649 struct qedr_srq *srq;
650 unsigned long flags;
651 struct ib_cq *ibcq;
652 struct ib_qp *ibqp;
653 struct qedr_cq *cq;
654 struct qedr_qp *qp;
655 u16 srq_id;
656
657 if (IS_ROCE(dev)) {
658 switch (e_code) {
659 case ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR:
660 event.event = IB_EVENT_CQ_ERR;
661 event_type = EVENT_TYPE_CQ;
662 break;
663 case ROCE_ASYNC_EVENT_SQ_DRAINED:
664 event.event = IB_EVENT_SQ_DRAINED;
665 event_type = EVENT_TYPE_QP;
666 break;
667 case ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR:
668 event.event = IB_EVENT_QP_FATAL;
669 event_type = EVENT_TYPE_QP;
670 break;
671 case ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR:
672 event.event = IB_EVENT_QP_REQ_ERR;
673 event_type = EVENT_TYPE_QP;
674 break;
675 case ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR:
676 event.event = IB_EVENT_QP_ACCESS_ERR;
677 event_type = EVENT_TYPE_QP;
678 break;
679 case ROCE_ASYNC_EVENT_SRQ_LIMIT:
680 event.event = IB_EVENT_SRQ_LIMIT_REACHED;
681 event_type = EVENT_TYPE_SRQ;
682 break;
683 case ROCE_ASYNC_EVENT_SRQ_EMPTY:
684 event.event = IB_EVENT_SRQ_ERR;
685 event_type = EVENT_TYPE_SRQ;
686 break;
687 case ROCE_ASYNC_EVENT_XRC_DOMAIN_ERR:
688 event.event = IB_EVENT_QP_ACCESS_ERR;
689 event_type = EVENT_TYPE_QP;
690 break;
691 case ROCE_ASYNC_EVENT_INVALID_XRCETH_ERR:
692 event.event = IB_EVENT_QP_ACCESS_ERR;
693 event_type = EVENT_TYPE_QP;
694 break;
695 case ROCE_ASYNC_EVENT_XRC_SRQ_CATASTROPHIC_ERR:
696 event.event = IB_EVENT_CQ_ERR;
697 event_type = EVENT_TYPE_CQ;
698 break;
699 default:
700 DP_ERR(dev, "unsupported event %d on handle=%llx\n",
701 e_code, roce_handle64);
702 }
703 } else {
704 switch (e_code) {
705 case QED_IWARP_EVENT_SRQ_LIMIT:
706 event.event = IB_EVENT_SRQ_LIMIT_REACHED;
707 event_type = EVENT_TYPE_SRQ;
708 break;
709 case QED_IWARP_EVENT_SRQ_EMPTY:
710 event.event = IB_EVENT_SRQ_ERR;
711 event_type = EVENT_TYPE_SRQ;
712 break;
713 default:
714 DP_ERR(dev, "unsupported event %d on handle=%llx\n", e_code,
715 roce_handle64);
716 }
717 }
718 switch (event_type) {
719 case EVENT_TYPE_CQ:
720 cq = (struct qedr_cq *)(uintptr_t)roce_handle64;
721 if (cq) {
722 ibcq = &cq->ibcq;
723 if (ibcq->event_handler) {
724 event.device = ibcq->device;
725 event.element.cq = ibcq;
726 ibcq->event_handler(&event, ibcq->cq_context);
727 }
728 } else {
729 WARN(1,
730 "Error: CQ event with NULL pointer ibcq. Handle=%llx\n",
731 roce_handle64);
732 }
733 DP_ERR(dev, "CQ event %d on handle %p\n", e_code, cq);
734 break;
735 case EVENT_TYPE_QP:
736 qp = (struct qedr_qp *)(uintptr_t)roce_handle64;
737 if (qp) {
738 ibqp = &qp->ibqp;
739 if (ibqp->event_handler) {
740 event.device = ibqp->device;
741 event.element.qp = ibqp;
742 ibqp->event_handler(&event, ibqp->qp_context);
743 }
744 } else {
745 WARN(1,
746 "Error: QP event with NULL pointer ibqp. Handle=%llx\n",
747 roce_handle64);
748 }
749 DP_ERR(dev, "QP event %d on handle %p\n", e_code, qp);
750 break;
751 case EVENT_TYPE_SRQ:
752 srq_id = (u16)roce_handle64;
753 xa_lock_irqsave(&dev->srqs, flags);
754 srq = xa_load(&dev->srqs, srq_id);
755 if (srq) {
756 ibsrq = &srq->ibsrq;
757 if (ibsrq->event_handler) {
758 event.device = ibsrq->device;
759 event.element.srq = ibsrq;
760 ibsrq->event_handler(&event,
761 ibsrq->srq_context);
762 }
763 } else {
764 DP_NOTICE(dev,
765 "SRQ event with NULL pointer ibsrq. Handle=%llx\n",
766 roce_handle64);
767 }
768 xa_unlock_irqrestore(&dev->srqs, flags);
769 DP_NOTICE(dev, "SRQ event %d on handle %p\n", e_code, srq);
770 break;
771 default:
772 break;
773 }
774}
775
776static int qedr_init_hw(struct qedr_dev *dev)
777{
778 struct qed_rdma_add_user_out_params out_params;
779 struct qed_rdma_start_in_params *in_params;
780 struct qed_rdma_cnq_params *cur_pbl;
781 struct qed_rdma_events events;
782 dma_addr_t p_phys_table;
783 u32 page_cnt;
784 int rc = 0;
785 int i;
786
787 in_params = kzalloc(sizeof(*in_params), GFP_KERNEL);
788 if (!in_params) {
789 rc = -ENOMEM;
790 goto out;
791 }
792
793 in_params->desired_cnq = dev->num_cnq;
794 for (i = 0; i < dev->num_cnq; i++) {
795 cur_pbl = &in_params->cnq_pbl_list[i];
796
797 page_cnt = qed_chain_get_page_cnt(&dev->cnq_array[i].pbl);
798 cur_pbl->num_pbl_pages = page_cnt;
799
800 p_phys_table = qed_chain_get_pbl_phys(&dev->cnq_array[i].pbl);
801 cur_pbl->pbl_ptr = (u64)p_phys_table;
802 }
803
804 events.affiliated_event = qedr_affiliated_event;
805 events.unaffiliated_event = qedr_unaffiliated_event;
806 events.context = dev;
807
808 in_params->events = &events;
809 in_params->cq_mode = QED_RDMA_CQ_MODE_32_BITS;
810 in_params->max_mtu = dev->ndev->mtu;
811 dev->iwarp_max_mtu = dev->ndev->mtu;
812 ether_addr_copy(&in_params->mac_addr[0], dev->ndev->dev_addr);
813
814 rc = dev->ops->rdma_init(dev->cdev, in_params);
815 if (rc)
816 goto out;
817
818 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &out_params);
819 if (rc)
820 goto out;
821
822 dev->db_addr = out_params.dpi_addr;
823 dev->db_phys_addr = out_params.dpi_phys_addr;
824 dev->db_size = out_params.dpi_size;
825 dev->dpi = out_params.dpi;
826
827 rc = qedr_set_device_attr(dev);
828out:
829 kfree(in_params);
830 if (rc)
831 DP_ERR(dev, "Init HW Failed rc = %d\n", rc);
832
833 return rc;
834}
835
836static void qedr_stop_hw(struct qedr_dev *dev)
837{
838 dev->ops->rdma_remove_user(dev->rdma_ctx, dev->dpi);
839 dev->ops->rdma_stop(dev->rdma_ctx);
840}
841
842static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
843 struct net_device *ndev)
844{
845 struct qed_dev_rdma_info dev_info;
846 struct qedr_dev *dev;
847 int rc = 0;
848
849 dev = ib_alloc_device(qedr_dev, ibdev);
850 if (!dev) {
851 pr_err("Unable to allocate ib device\n");
852 return NULL;
853 }
854
855 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr add device called\n");
856
857 dev->pdev = pdev;
858 dev->ndev = ndev;
859 dev->cdev = cdev;
860
861 qed_ops = qed_get_rdma_ops();
862 if (!qed_ops) {
863 DP_ERR(dev, "Failed to get qed roce operations\n");
864 goto init_err;
865 }
866
867 dev->ops = qed_ops;
868 rc = qed_ops->fill_dev_info(cdev, &dev_info);
869 if (rc)
870 goto init_err;
871
872 dev->user_dpm_enabled = dev_info.user_dpm_enabled;
873 dev->rdma_type = dev_info.rdma_type;
874 dev->num_hwfns = dev_info.common.num_hwfns;
875
876 if (IS_IWARP(dev) && QEDR_IS_CMT(dev)) {
877 rc = dev->ops->iwarp_set_engine_affin(cdev, false);
878 if (rc) {
879 DP_ERR(dev, "iWARP is disabled over a 100g device Enabling it may impact L2 performance. To enable it run devlink dev param set <dev> name iwarp_cmt value true cmode runtime\n");
880 goto init_err;
881 }
882 }
883 dev->affin_hwfn_idx = dev->ops->common->get_affin_hwfn_idx(cdev);
884
885 dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev);
886
887 dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev);
888 if (!dev->num_cnq) {
889 DP_ERR(dev, "Failed. At least one CNQ is required.\n");
890 rc = -ENOMEM;
891 goto init_err;
892 }
893
894 dev->wq_multiplier = QEDR_WQ_MULTIPLIER_DFT;
895
896 qedr_pci_set_atomic(dev, pdev);
897
898 rc = qedr_alloc_resources(dev);
899 if (rc)
900 goto init_err;
901
902 rc = qedr_init_hw(dev);
903 if (rc)
904 goto alloc_err;
905
906 rc = qedr_setup_irqs(dev);
907 if (rc)
908 goto irq_err;
909
910 rc = qedr_register_device(dev);
911 if (rc) {
912 DP_ERR(dev, "Unable to allocate register device\n");
913 goto reg_err;
914 }
915
916 if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
917 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
918
919 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
920 return dev;
921
922reg_err:
923 qedr_sync_free_irqs(dev);
924irq_err:
925 qedr_stop_hw(dev);
926alloc_err:
927 qedr_free_resources(dev);
928init_err:
929 ib_dealloc_device(&dev->ibdev);
930 DP_ERR(dev, "qedr driver load failed rc=%d\n", rc);
931
932 return NULL;
933}
934
935static void qedr_remove(struct qedr_dev *dev)
936{
937
938
939
940 ib_unregister_device(&dev->ibdev);
941
942 qedr_stop_hw(dev);
943 qedr_sync_free_irqs(dev);
944 qedr_free_resources(dev);
945
946 if (IS_IWARP(dev) && QEDR_IS_CMT(dev))
947 dev->ops->iwarp_set_engine_affin(dev->cdev, true);
948
949 ib_dealloc_device(&dev->ibdev);
950}
951
952static void qedr_close(struct qedr_dev *dev)
953{
954 if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
955 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR);
956}
957
958static void qedr_shutdown(struct qedr_dev *dev)
959{
960 qedr_close(dev);
961 qedr_remove(dev);
962}
963
964static void qedr_open(struct qedr_dev *dev)
965{
966 if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
967 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
968}
969
970static void qedr_mac_address_change(struct qedr_dev *dev)
971{
972 union ib_gid *sgid = &dev->sgid_tbl[0];
973 u8 guid[8], mac_addr[6];
974 int rc;
975
976
977 ether_addr_copy(&mac_addr[0], dev->ndev->dev_addr);
978 guid[0] = mac_addr[0] ^ 2;
979 guid[1] = mac_addr[1];
980 guid[2] = mac_addr[2];
981 guid[3] = 0xff;
982 guid[4] = 0xfe;
983 guid[5] = mac_addr[3];
984 guid[6] = mac_addr[4];
985 guid[7] = mac_addr[5];
986 sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
987 memcpy(&sgid->raw[8], guid, sizeof(guid));
988
989
990 rc = dev->ops->ll2_set_mac_filter(dev->cdev,
991 dev->gsi_ll2_mac_address,
992 dev->ndev->dev_addr);
993
994 ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
995
996 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE);
997
998 if (rc)
999 DP_ERR(dev, "Error updating mac filter\n");
1000}
1001
1002
1003
1004
1005
1006static void qedr_notify(struct qedr_dev *dev, enum qede_rdma_event event)
1007{
1008 switch (event) {
1009 case QEDE_UP:
1010 qedr_open(dev);
1011 break;
1012 case QEDE_DOWN:
1013 qedr_close(dev);
1014 break;
1015 case QEDE_CLOSE:
1016 qedr_shutdown(dev);
1017 break;
1018 case QEDE_CHANGE_ADDR:
1019 qedr_mac_address_change(dev);
1020 break;
1021 case QEDE_CHANGE_MTU:
1022 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1023 if (dev->ndev->mtu != dev->iwarp_max_mtu)
1024 DP_NOTICE(dev,
1025 "Mtu was changed from %d to %d. This will not take affect for iWARP until qedr is reloaded\n",
1026 dev->iwarp_max_mtu, dev->ndev->mtu);
1027 break;
1028 default:
1029 pr_err("Event not supported\n");
1030 }
1031}
1032
1033static struct qedr_driver qedr_drv = {
1034 .name = "qedr_driver",
1035 .add = qedr_add,
1036 .remove = qedr_remove,
1037 .notify = qedr_notify,
1038};
1039
1040static int __init qedr_init_module(void)
1041{
1042 return qede_rdma_register_driver(&qedr_drv);
1043}
1044
1045static void __exit qedr_exit_module(void)
1046{
1047 qede_rdma_unregister_driver(&qedr_drv);
1048}
1049
1050module_init(qedr_init_module);
1051module_exit(qedr_exit_module);
1052