1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46#include <linux/errno.h>
47#include <linux/inetdevice.h>
48#include <linux/init.h>
49#include <linux/module.h>
50#include <linux/slab.h>
51#include <rdma/ib_addr.h>
52#include <rdma/ib_smi.h>
53#include <rdma/ib_user_verbs.h>
54#include <net/addrconf.h>
55
56#include "pvrdma.h"
57
58#define DRV_NAME "vmw_pvrdma"
59#define DRV_VERSION "1.0.1.0-k"
60
61static DEFINE_MUTEX(pvrdma_device_list_lock);
62static LIST_HEAD(pvrdma_device_list);
63static struct workqueue_struct *event_wq;
64
65static int pvrdma_add_gid(const struct ib_gid_attr *attr, void **context);
66static int pvrdma_del_gid(const struct ib_gid_attr *attr, void **context);
67
68static ssize_t hca_type_show(struct device *device,
69 struct device_attribute *attr, char *buf)
70{
71 return sprintf(buf, "VMW_PVRDMA-%s\n", DRV_VERSION);
72}
73static DEVICE_ATTR_RO(hca_type);
74
75static ssize_t hw_rev_show(struct device *device,
76 struct device_attribute *attr, char *buf)
77{
78 return sprintf(buf, "%d\n", PVRDMA_REV_ID);
79}
80static DEVICE_ATTR_RO(hw_rev);
81
82static ssize_t board_id_show(struct device *device,
83 struct device_attribute *attr, char *buf)
84{
85 return sprintf(buf, "%d\n", PVRDMA_BOARD_ID);
86}
87static DEVICE_ATTR_RO(board_id);
88
89static struct attribute *pvrdma_class_attributes[] = {
90 &dev_attr_hw_rev.attr,
91 &dev_attr_hca_type.attr,
92 &dev_attr_board_id.attr,
93 NULL,
94};
95
96static const struct attribute_group pvrdma_attr_group = {
97 .attrs = pvrdma_class_attributes,
98};
99
100static void pvrdma_get_fw_ver_str(struct ib_device *device, char *str)
101{
102 struct pvrdma_dev *dev =
103 container_of(device, struct pvrdma_dev, ib_dev);
104 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d\n",
105 (int) (dev->dsr->caps.fw_ver >> 32),
106 (int) (dev->dsr->caps.fw_ver >> 16) & 0xffff,
107 (int) dev->dsr->caps.fw_ver & 0xffff);
108}
109
110static int pvrdma_init_device(struct pvrdma_dev *dev)
111{
112
113 spin_lock_init(&dev->cmd_lock);
114 sema_init(&dev->cmd_sema, 1);
115 atomic_set(&dev->num_qps, 0);
116 atomic_set(&dev->num_srqs, 0);
117 atomic_set(&dev->num_cqs, 0);
118 atomic_set(&dev->num_pds, 0);
119 atomic_set(&dev->num_ahs, 0);
120
121 return 0;
122}
123
124static int pvrdma_port_immutable(struct ib_device *ibdev, u8 port_num,
125 struct ib_port_immutable *immutable)
126{
127 struct pvrdma_dev *dev = to_vdev(ibdev);
128 struct ib_port_attr attr;
129 int err;
130
131 if (dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V1)
132 immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE;
133 else if (dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V2)
134 immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
135
136 err = ib_query_port(ibdev, port_num, &attr);
137 if (err)
138 return err;
139
140 immutable->pkey_tbl_len = attr.pkey_tbl_len;
141 immutable->gid_tbl_len = attr.gid_tbl_len;
142 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
143 return 0;
144}
145
146static const struct ib_device_ops pvrdma_dev_ops = {
147 .add_gid = pvrdma_add_gid,
148 .alloc_mr = pvrdma_alloc_mr,
149 .alloc_pd = pvrdma_alloc_pd,
150 .alloc_ucontext = pvrdma_alloc_ucontext,
151 .create_ah = pvrdma_create_ah,
152 .create_cq = pvrdma_create_cq,
153 .create_qp = pvrdma_create_qp,
154 .dealloc_pd = pvrdma_dealloc_pd,
155 .dealloc_ucontext = pvrdma_dealloc_ucontext,
156 .del_gid = pvrdma_del_gid,
157 .dereg_mr = pvrdma_dereg_mr,
158 .destroy_ah = pvrdma_destroy_ah,
159 .destroy_cq = pvrdma_destroy_cq,
160 .destroy_qp = pvrdma_destroy_qp,
161 .get_dev_fw_str = pvrdma_get_fw_ver_str,
162 .get_dma_mr = pvrdma_get_dma_mr,
163 .get_link_layer = pvrdma_port_link_layer,
164 .get_port_immutable = pvrdma_port_immutable,
165 .map_mr_sg = pvrdma_map_mr_sg,
166 .mmap = pvrdma_mmap,
167 .modify_port = pvrdma_modify_port,
168 .modify_qp = pvrdma_modify_qp,
169 .poll_cq = pvrdma_poll_cq,
170 .post_recv = pvrdma_post_recv,
171 .post_send = pvrdma_post_send,
172 .query_device = pvrdma_query_device,
173 .query_gid = pvrdma_query_gid,
174 .query_pkey = pvrdma_query_pkey,
175 .query_port = pvrdma_query_port,
176 .query_qp = pvrdma_query_qp,
177 .reg_user_mr = pvrdma_reg_user_mr,
178 .req_notify_cq = pvrdma_req_notify_cq,
179
180 INIT_RDMA_OBJ_SIZE(ib_ah, pvrdma_ah, ibah),
181 INIT_RDMA_OBJ_SIZE(ib_pd, pvrdma_pd, ibpd),
182 INIT_RDMA_OBJ_SIZE(ib_ucontext, pvrdma_ucontext, ibucontext),
183};
184
185static const struct ib_device_ops pvrdma_dev_srq_ops = {
186 .create_srq = pvrdma_create_srq,
187 .destroy_srq = pvrdma_destroy_srq,
188 .modify_srq = pvrdma_modify_srq,
189 .query_srq = pvrdma_query_srq,
190
191 INIT_RDMA_OBJ_SIZE(ib_srq, pvrdma_srq, ibsrq),
192};
193
194static int pvrdma_register_device(struct pvrdma_dev *dev)
195{
196 int ret = -1;
197
198 dev->ib_dev.node_guid = dev->dsr->caps.node_guid;
199 dev->sys_image_guid = dev->dsr->caps.sys_image_guid;
200 dev->flags = 0;
201 dev->ib_dev.owner = THIS_MODULE;
202 dev->ib_dev.num_comp_vectors = 1;
203 dev->ib_dev.dev.parent = &dev->pdev->dev;
204 dev->ib_dev.uverbs_abi_ver = PVRDMA_UVERBS_ABI_VERSION;
205 dev->ib_dev.uverbs_cmd_mask =
206 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
207 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
208 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
209 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
210 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
211 (1ull << IB_USER_VERBS_CMD_REG_MR) |
212 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
213 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
214 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
215 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
216 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
217 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
218 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
219 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
220 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
221 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
222 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
223 (1ull << IB_USER_VERBS_CMD_POST_RECV) |
224 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
225 (1ull << IB_USER_VERBS_CMD_DESTROY_AH);
226
227 dev->ib_dev.node_type = RDMA_NODE_IB_CA;
228 dev->ib_dev.phys_port_cnt = dev->dsr->caps.phys_port_cnt;
229
230 ib_set_device_ops(&dev->ib_dev, &pvrdma_dev_ops);
231
232 mutex_init(&dev->port_mutex);
233 spin_lock_init(&dev->desc_lock);
234
235 dev->cq_tbl = kcalloc(dev->dsr->caps.max_cq, sizeof(struct pvrdma_cq *),
236 GFP_KERNEL);
237 if (!dev->cq_tbl)
238 return ret;
239 spin_lock_init(&dev->cq_tbl_lock);
240
241 dev->qp_tbl = kcalloc(dev->dsr->caps.max_qp, sizeof(struct pvrdma_qp *),
242 GFP_KERNEL);
243 if (!dev->qp_tbl)
244 goto err_cq_free;
245 spin_lock_init(&dev->qp_tbl_lock);
246
247
248 if (dev->dsr->caps.max_srq) {
249 dev->ib_dev.uverbs_cmd_mask |=
250 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
251 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
252 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
253 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
254 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
255
256 ib_set_device_ops(&dev->ib_dev, &pvrdma_dev_srq_ops);
257
258 dev->srq_tbl = kcalloc(dev->dsr->caps.max_srq,
259 sizeof(struct pvrdma_srq *),
260 GFP_KERNEL);
261 if (!dev->srq_tbl)
262 goto err_qp_free;
263 }
264 dev->ib_dev.driver_id = RDMA_DRIVER_VMW_PVRDMA;
265 ret = ib_device_set_netdev(&dev->ib_dev, dev->netdev, 1);
266 if (ret)
267 return ret;
268 spin_lock_init(&dev->srq_tbl_lock);
269 rdma_set_device_sysfs_group(&dev->ib_dev, &pvrdma_attr_group);
270
271 ret = ib_register_device(&dev->ib_dev, "vmw_pvrdma%d");
272 if (ret)
273 goto err_srq_free;
274
275 dev->ib_active = true;
276
277 return 0;
278
279err_srq_free:
280 kfree(dev->srq_tbl);
281err_qp_free:
282 kfree(dev->qp_tbl);
283err_cq_free:
284 kfree(dev->cq_tbl);
285
286 return ret;
287}
288
289static irqreturn_t pvrdma_intr0_handler(int irq, void *dev_id)
290{
291 u32 icr = PVRDMA_INTR_CAUSE_RESPONSE;
292 struct pvrdma_dev *dev = dev_id;
293
294 dev_dbg(&dev->pdev->dev, "interrupt 0 (response) handler\n");
295
296 if (!dev->pdev->msix_enabled) {
297
298 icr = pvrdma_read_reg(dev, PVRDMA_REG_ICR);
299 if (icr == 0)
300 return IRQ_NONE;
301 }
302
303 if (icr == PVRDMA_INTR_CAUSE_RESPONSE)
304 complete(&dev->cmd_done);
305
306 return IRQ_HANDLED;
307}
308
309static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type)
310{
311 struct pvrdma_qp *qp;
312 unsigned long flags;
313
314 spin_lock_irqsave(&dev->qp_tbl_lock, flags);
315 qp = dev->qp_tbl[qpn % dev->dsr->caps.max_qp];
316 if (qp)
317 refcount_inc(&qp->refcnt);
318 spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
319
320 if (qp && qp->ibqp.event_handler) {
321 struct ib_qp *ibqp = &qp->ibqp;
322 struct ib_event e;
323
324 e.device = ibqp->device;
325 e.element.qp = ibqp;
326 e.event = type;
327 ibqp->event_handler(&e, ibqp->qp_context);
328 }
329 if (qp) {
330 if (refcount_dec_and_test(&qp->refcnt))
331 complete(&qp->free);
332 }
333}
334
335static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type)
336{
337 struct pvrdma_cq *cq;
338 unsigned long flags;
339
340 spin_lock_irqsave(&dev->cq_tbl_lock, flags);
341 cq = dev->cq_tbl[cqn % dev->dsr->caps.max_cq];
342 if (cq)
343 refcount_inc(&cq->refcnt);
344 spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
345
346 if (cq && cq->ibcq.event_handler) {
347 struct ib_cq *ibcq = &cq->ibcq;
348 struct ib_event e;
349
350 e.device = ibcq->device;
351 e.element.cq = ibcq;
352 e.event = type;
353 ibcq->event_handler(&e, ibcq->cq_context);
354 }
355 if (cq) {
356 if (refcount_dec_and_test(&cq->refcnt))
357 complete(&cq->free);
358 }
359}
360
361static void pvrdma_srq_event(struct pvrdma_dev *dev, u32 srqn, int type)
362{
363 struct pvrdma_srq *srq;
364 unsigned long flags;
365
366 spin_lock_irqsave(&dev->srq_tbl_lock, flags);
367 if (dev->srq_tbl)
368 srq = dev->srq_tbl[srqn % dev->dsr->caps.max_srq];
369 else
370 srq = NULL;
371 if (srq)
372 refcount_inc(&srq->refcnt);
373 spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
374
375 if (srq && srq->ibsrq.event_handler) {
376 struct ib_srq *ibsrq = &srq->ibsrq;
377 struct ib_event e;
378
379 e.device = ibsrq->device;
380 e.element.srq = ibsrq;
381 e.event = type;
382 ibsrq->event_handler(&e, ibsrq->srq_context);
383 }
384 if (srq) {
385 if (refcount_dec_and_test(&srq->refcnt))
386 complete(&srq->free);
387 }
388}
389
390static void pvrdma_dispatch_event(struct pvrdma_dev *dev, int port,
391 enum ib_event_type event)
392{
393 struct ib_event ib_event;
394
395 memset(&ib_event, 0, sizeof(ib_event));
396 ib_event.device = &dev->ib_dev;
397 ib_event.element.port_num = port;
398 ib_event.event = event;
399 ib_dispatch_event(&ib_event);
400}
401
402static void pvrdma_dev_event(struct pvrdma_dev *dev, u8 port, int type)
403{
404 if (port < 1 || port > dev->dsr->caps.phys_port_cnt) {
405 dev_warn(&dev->pdev->dev, "event on port %d\n", port);
406 return;
407 }
408
409 pvrdma_dispatch_event(dev, port, type);
410}
411
412static inline struct pvrdma_eqe *get_eqe(struct pvrdma_dev *dev, unsigned int i)
413{
414 return (struct pvrdma_eqe *)pvrdma_page_dir_get_ptr(
415 &dev->async_pdir,
416 PAGE_SIZE +
417 sizeof(struct pvrdma_eqe) * i);
418}
419
420static irqreturn_t pvrdma_intr1_handler(int irq, void *dev_id)
421{
422 struct pvrdma_dev *dev = dev_id;
423 struct pvrdma_ring *ring = &dev->async_ring_state->rx;
424 int ring_slots = (dev->dsr->async_ring_pages.num_pages - 1) *
425 PAGE_SIZE / sizeof(struct pvrdma_eqe);
426 unsigned int head;
427
428 dev_dbg(&dev->pdev->dev, "interrupt 1 (async event) handler\n");
429
430
431
432
433
434 if (!dev->ib_active)
435 return IRQ_HANDLED;
436
437 while (pvrdma_idx_ring_has_data(ring, ring_slots, &head) > 0) {
438 struct pvrdma_eqe *eqe;
439
440 eqe = get_eqe(dev, head);
441
442 switch (eqe->type) {
443 case PVRDMA_EVENT_QP_FATAL:
444 case PVRDMA_EVENT_QP_REQ_ERR:
445 case PVRDMA_EVENT_QP_ACCESS_ERR:
446 case PVRDMA_EVENT_COMM_EST:
447 case PVRDMA_EVENT_SQ_DRAINED:
448 case PVRDMA_EVENT_PATH_MIG:
449 case PVRDMA_EVENT_PATH_MIG_ERR:
450 case PVRDMA_EVENT_QP_LAST_WQE_REACHED:
451 pvrdma_qp_event(dev, eqe->info, eqe->type);
452 break;
453
454 case PVRDMA_EVENT_CQ_ERR:
455 pvrdma_cq_event(dev, eqe->info, eqe->type);
456 break;
457
458 case PVRDMA_EVENT_SRQ_ERR:
459 case PVRDMA_EVENT_SRQ_LIMIT_REACHED:
460 pvrdma_srq_event(dev, eqe->info, eqe->type);
461 break;
462
463 case PVRDMA_EVENT_PORT_ACTIVE:
464 case PVRDMA_EVENT_PORT_ERR:
465 case PVRDMA_EVENT_LID_CHANGE:
466 case PVRDMA_EVENT_PKEY_CHANGE:
467 case PVRDMA_EVENT_SM_CHANGE:
468 case PVRDMA_EVENT_CLIENT_REREGISTER:
469 case PVRDMA_EVENT_GID_CHANGE:
470 pvrdma_dev_event(dev, eqe->info, eqe->type);
471 break;
472
473 case PVRDMA_EVENT_DEVICE_FATAL:
474 pvrdma_dev_event(dev, 1, eqe->type);
475 break;
476
477 default:
478 break;
479 }
480
481 pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
482 }
483
484 return IRQ_HANDLED;
485}
486
487static inline struct pvrdma_cqne *get_cqne(struct pvrdma_dev *dev,
488 unsigned int i)
489{
490 return (struct pvrdma_cqne *)pvrdma_page_dir_get_ptr(
491 &dev->cq_pdir,
492 PAGE_SIZE +
493 sizeof(struct pvrdma_cqne) * i);
494}
495
496static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id)
497{
498 struct pvrdma_dev *dev = dev_id;
499 struct pvrdma_ring *ring = &dev->cq_ring_state->rx;
500 int ring_slots = (dev->dsr->cq_ring_pages.num_pages - 1) * PAGE_SIZE /
501 sizeof(struct pvrdma_cqne);
502 unsigned int head;
503 unsigned long flags;
504
505 dev_dbg(&dev->pdev->dev, "interrupt x (completion) handler\n");
506
507 while (pvrdma_idx_ring_has_data(ring, ring_slots, &head) > 0) {
508 struct pvrdma_cqne *cqne;
509 struct pvrdma_cq *cq;
510
511 cqne = get_cqne(dev, head);
512 spin_lock_irqsave(&dev->cq_tbl_lock, flags);
513 cq = dev->cq_tbl[cqne->info % dev->dsr->caps.max_cq];
514 if (cq)
515 refcount_inc(&cq->refcnt);
516 spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
517
518 if (cq && cq->ibcq.comp_handler)
519 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
520 if (cq) {
521 if (refcount_dec_and_test(&cq->refcnt))
522 complete(&cq->free);
523 }
524 pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
525 }
526
527 return IRQ_HANDLED;
528}
529
530static void pvrdma_free_irq(struct pvrdma_dev *dev)
531{
532 int i;
533
534 dev_dbg(&dev->pdev->dev, "freeing interrupts\n");
535 for (i = 0; i < dev->nr_vectors; i++)
536 free_irq(pci_irq_vector(dev->pdev, i), dev);
537}
538
539static void pvrdma_enable_intrs(struct pvrdma_dev *dev)
540{
541 dev_dbg(&dev->pdev->dev, "enable interrupts\n");
542 pvrdma_write_reg(dev, PVRDMA_REG_IMR, 0);
543}
544
545static void pvrdma_disable_intrs(struct pvrdma_dev *dev)
546{
547 dev_dbg(&dev->pdev->dev, "disable interrupts\n");
548 pvrdma_write_reg(dev, PVRDMA_REG_IMR, ~0);
549}
550
551static int pvrdma_alloc_intrs(struct pvrdma_dev *dev)
552{
553 struct pci_dev *pdev = dev->pdev;
554 int ret = 0, i;
555
556 ret = pci_alloc_irq_vectors(pdev, 1, PVRDMA_MAX_INTERRUPTS,
557 PCI_IRQ_MSIX);
558 if (ret < 0) {
559 ret = pci_alloc_irq_vectors(pdev, 1, 1,
560 PCI_IRQ_MSI | PCI_IRQ_LEGACY);
561 if (ret < 0)
562 return ret;
563 }
564 dev->nr_vectors = ret;
565
566 ret = request_irq(pci_irq_vector(dev->pdev, 0), pvrdma_intr0_handler,
567 pdev->msix_enabled ? 0 : IRQF_SHARED, DRV_NAME, dev);
568 if (ret) {
569 dev_err(&dev->pdev->dev,
570 "failed to request interrupt 0\n");
571 goto out_free_vectors;
572 }
573
574 for (i = 1; i < dev->nr_vectors; i++) {
575 ret = request_irq(pci_irq_vector(dev->pdev, i),
576 i == 1 ? pvrdma_intr1_handler :
577 pvrdma_intrx_handler,
578 0, DRV_NAME, dev);
579 if (ret) {
580 dev_err(&dev->pdev->dev,
581 "failed to request interrupt %d\n", i);
582 goto free_irqs;
583 }
584 }
585
586 return 0;
587
588free_irqs:
589 while (--i >= 0)
590 free_irq(pci_irq_vector(dev->pdev, i), dev);
591out_free_vectors:
592 pci_free_irq_vectors(pdev);
593 return ret;
594}
595
596static void pvrdma_free_slots(struct pvrdma_dev *dev)
597{
598 struct pci_dev *pdev = dev->pdev;
599
600 if (dev->resp_slot)
601 dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->resp_slot,
602 dev->dsr->resp_slot_dma);
603 if (dev->cmd_slot)
604 dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->cmd_slot,
605 dev->dsr->cmd_slot_dma);
606}
607
608static int pvrdma_add_gid_at_index(struct pvrdma_dev *dev,
609 const union ib_gid *gid,
610 u8 gid_type,
611 int index)
612{
613 int ret;
614 union pvrdma_cmd_req req;
615 struct pvrdma_cmd_create_bind *cmd_bind = &req.create_bind;
616
617 if (!dev->sgid_tbl) {
618 dev_warn(&dev->pdev->dev, "sgid table not initialized\n");
619 return -EINVAL;
620 }
621
622 memset(cmd_bind, 0, sizeof(*cmd_bind));
623 cmd_bind->hdr.cmd = PVRDMA_CMD_CREATE_BIND;
624 memcpy(cmd_bind->new_gid, gid->raw, 16);
625 cmd_bind->mtu = ib_mtu_enum_to_int(IB_MTU_1024);
626 cmd_bind->vlan = 0xfff;
627 cmd_bind->index = index;
628 cmd_bind->gid_type = gid_type;
629
630 ret = pvrdma_cmd_post(dev, &req, NULL, 0);
631 if (ret < 0) {
632 dev_warn(&dev->pdev->dev,
633 "could not create binding, error: %d\n", ret);
634 return -EFAULT;
635 }
636 memcpy(&dev->sgid_tbl[index], gid, sizeof(*gid));
637 return 0;
638}
639
640static int pvrdma_add_gid(const struct ib_gid_attr *attr, void **context)
641{
642 struct pvrdma_dev *dev = to_vdev(attr->device);
643
644 return pvrdma_add_gid_at_index(dev, &attr->gid,
645 ib_gid_type_to_pvrdma(attr->gid_type),
646 attr->index);
647}
648
649static int pvrdma_del_gid_at_index(struct pvrdma_dev *dev, int index)
650{
651 int ret;
652 union pvrdma_cmd_req req;
653 struct pvrdma_cmd_destroy_bind *cmd_dest = &req.destroy_bind;
654
655
656 if (!dev->sgid_tbl) {
657 dev_warn(&dev->pdev->dev, "sgid table not initialized\n");
658 return -EINVAL;
659 }
660
661 memset(cmd_dest, 0, sizeof(*cmd_dest));
662 cmd_dest->hdr.cmd = PVRDMA_CMD_DESTROY_BIND;
663 memcpy(cmd_dest->dest_gid, &dev->sgid_tbl[index], 16);
664 cmd_dest->index = index;
665
666 ret = pvrdma_cmd_post(dev, &req, NULL, 0);
667 if (ret < 0) {
668 dev_warn(&dev->pdev->dev,
669 "could not destroy binding, error: %d\n", ret);
670 return ret;
671 }
672 memset(&dev->sgid_tbl[index], 0, 16);
673 return 0;
674}
675
676static int pvrdma_del_gid(const struct ib_gid_attr *attr, void **context)
677{
678 struct pvrdma_dev *dev = to_vdev(attr->device);
679
680 dev_dbg(&dev->pdev->dev, "removing gid at index %u from %s",
681 attr->index, dev->netdev->name);
682
683 return pvrdma_del_gid_at_index(dev, attr->index);
684}
685
686static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev,
687 struct net_device *ndev,
688 unsigned long event)
689{
690 struct pci_dev *pdev_net;
691 unsigned int slot;
692
693 switch (event) {
694 case NETDEV_REBOOT:
695 case NETDEV_DOWN:
696 pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
697 break;
698 case NETDEV_UP:
699 pvrdma_write_reg(dev, PVRDMA_REG_CTL,
700 PVRDMA_DEVICE_CTL_UNQUIESCE);
701
702 mb();
703
704 if (pvrdma_read_reg(dev, PVRDMA_REG_ERR))
705 dev_err(&dev->pdev->dev,
706 "failed to activate device during link up\n");
707 else
708 pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
709 break;
710 case NETDEV_UNREGISTER:
711 ib_device_set_netdev(&dev->ib_dev, NULL, 1);
712 dev_put(dev->netdev);
713 dev->netdev = NULL;
714 break;
715 case NETDEV_REGISTER:
716
717 slot = PCI_SLOT(dev->pdev->devfn);
718 pdev_net = pci_get_slot(dev->pdev->bus,
719 PCI_DEVFN(slot, 0));
720 if ((dev->netdev == NULL) &&
721 (pci_get_drvdata(pdev_net) == ndev)) {
722
723 ib_device_set_netdev(&dev->ib_dev, ndev, 1);
724 dev->netdev = ndev;
725 dev_hold(ndev);
726 }
727 pci_dev_put(pdev_net);
728 break;
729
730 default:
731 dev_dbg(&dev->pdev->dev, "ignore netdevice event %ld on %s\n",
732 event, dev_name(&dev->ib_dev.dev));
733 break;
734 }
735}
736
737static void pvrdma_netdevice_event_work(struct work_struct *work)
738{
739 struct pvrdma_netdevice_work *netdev_work;
740 struct pvrdma_dev *dev;
741
742 netdev_work = container_of(work, struct pvrdma_netdevice_work, work);
743
744 mutex_lock(&pvrdma_device_list_lock);
745 list_for_each_entry(dev, &pvrdma_device_list, device_link) {
746 if ((netdev_work->event == NETDEV_REGISTER) ||
747 (dev->netdev == netdev_work->event_netdev)) {
748 pvrdma_netdevice_event_handle(dev,
749 netdev_work->event_netdev,
750 netdev_work->event);
751 break;
752 }
753 }
754 mutex_unlock(&pvrdma_device_list_lock);
755
756 kfree(netdev_work);
757}
758
759static int pvrdma_netdevice_event(struct notifier_block *this,
760 unsigned long event, void *ptr)
761{
762 struct net_device *event_netdev = netdev_notifier_info_to_dev(ptr);
763 struct pvrdma_netdevice_work *netdev_work;
764
765 netdev_work = kmalloc(sizeof(*netdev_work), GFP_ATOMIC);
766 if (!netdev_work)
767 return NOTIFY_BAD;
768
769 INIT_WORK(&netdev_work->work, pvrdma_netdevice_event_work);
770 netdev_work->event_netdev = event_netdev;
771 netdev_work->event = event;
772 queue_work(event_wq, &netdev_work->work);
773
774 return NOTIFY_DONE;
775}
776
777static int pvrdma_pci_probe(struct pci_dev *pdev,
778 const struct pci_device_id *id)
779{
780 struct pci_dev *pdev_net;
781 struct pvrdma_dev *dev;
782 int ret;
783 unsigned long start;
784 unsigned long len;
785 dma_addr_t slot_dma = 0;
786
787 dev_dbg(&pdev->dev, "initializing driver %s\n", pci_name(pdev));
788
789
790 dev = ib_alloc_device(pvrdma_dev, ib_dev);
791 if (!dev) {
792 dev_err(&pdev->dev, "failed to allocate IB device\n");
793 return -ENOMEM;
794 }
795
796 mutex_lock(&pvrdma_device_list_lock);
797 list_add(&dev->device_link, &pvrdma_device_list);
798 mutex_unlock(&pvrdma_device_list_lock);
799
800 ret = pvrdma_init_device(dev);
801 if (ret)
802 goto err_free_device;
803
804 dev->pdev = pdev;
805 pci_set_drvdata(pdev, dev);
806
807 ret = pci_enable_device(pdev);
808 if (ret) {
809 dev_err(&pdev->dev, "cannot enable PCI device\n");
810 goto err_free_device;
811 }
812
813 dev_dbg(&pdev->dev, "PCI resource flags BAR0 %#lx\n",
814 pci_resource_flags(pdev, 0));
815 dev_dbg(&pdev->dev, "PCI resource len %#llx\n",
816 (unsigned long long)pci_resource_len(pdev, 0));
817 dev_dbg(&pdev->dev, "PCI resource start %#llx\n",
818 (unsigned long long)pci_resource_start(pdev, 0));
819 dev_dbg(&pdev->dev, "PCI resource flags BAR1 %#lx\n",
820 pci_resource_flags(pdev, 1));
821 dev_dbg(&pdev->dev, "PCI resource len %#llx\n",
822 (unsigned long long)pci_resource_len(pdev, 1));
823 dev_dbg(&pdev->dev, "PCI resource start %#llx\n",
824 (unsigned long long)pci_resource_start(pdev, 1));
825
826 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
827 !(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
828 dev_err(&pdev->dev, "PCI BAR region not MMIO\n");
829 ret = -ENOMEM;
830 goto err_free_device;
831 }
832
833 ret = pci_request_regions(pdev, DRV_NAME);
834 if (ret) {
835 dev_err(&pdev->dev, "cannot request PCI resources\n");
836 goto err_disable_pdev;
837 }
838
839
840 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
841 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
842 if (ret != 0) {
843 dev_err(&pdev->dev,
844 "pci_set_consistent_dma_mask failed\n");
845 goto err_free_resource;
846 }
847 } else {
848 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
849 if (ret != 0) {
850 dev_err(&pdev->dev,
851 "pci_set_dma_mask failed\n");
852 goto err_free_resource;
853 }
854 }
855
856 pci_set_master(pdev);
857
858
859 start = pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_REG);
860 len = pci_resource_len(dev->pdev, PVRDMA_PCI_RESOURCE_REG);
861 dev->regs = ioremap(start, len);
862 if (!dev->regs) {
863 dev_err(&pdev->dev, "register mapping failed\n");
864 ret = -ENOMEM;
865 goto err_free_resource;
866 }
867
868
869 dev->driver_uar.index = 0;
870 dev->driver_uar.pfn =
871 pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_UAR) >>
872 PAGE_SHIFT;
873 dev->driver_uar.map =
874 ioremap(dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
875 if (!dev->driver_uar.map) {
876 dev_err(&pdev->dev, "failed to remap UAR pages\n");
877 ret = -ENOMEM;
878 goto err_unmap_regs;
879 }
880
881 dev->dsr_version = pvrdma_read_reg(dev, PVRDMA_REG_VERSION);
882 dev_info(&pdev->dev, "device version %d, driver version %d\n",
883 dev->dsr_version, PVRDMA_VERSION);
884
885 dev->dsr = dma_alloc_coherent(&pdev->dev, sizeof(*dev->dsr),
886 &dev->dsrbase, GFP_KERNEL);
887 if (!dev->dsr) {
888 dev_err(&pdev->dev, "failed to allocate shared region\n");
889 ret = -ENOMEM;
890 goto err_uar_unmap;
891 }
892
893
894 dev->dsr->driver_version = PVRDMA_VERSION;
895 dev->dsr->gos_info.gos_bits = sizeof(void *) == 4 ?
896 PVRDMA_GOS_BITS_32 :
897 PVRDMA_GOS_BITS_64;
898 dev->dsr->gos_info.gos_type = PVRDMA_GOS_TYPE_LINUX;
899 dev->dsr->gos_info.gos_ver = 1;
900
901 if (dev->dsr_version < PVRDMA_PPN64_VERSION)
902 dev->dsr->uar_pfn = dev->driver_uar.pfn;
903 else
904 dev->dsr->uar_pfn64 = dev->driver_uar.pfn;
905
906
907 dev->cmd_slot = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
908 &slot_dma, GFP_KERNEL);
909 if (!dev->cmd_slot) {
910 ret = -ENOMEM;
911 goto err_free_dsr;
912 }
913
914 dev->dsr->cmd_slot_dma = (u64)slot_dma;
915
916
917 dev->resp_slot = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
918 &slot_dma, GFP_KERNEL);
919 if (!dev->resp_slot) {
920 ret = -ENOMEM;
921 goto err_free_slots;
922 }
923
924 dev->dsr->resp_slot_dma = (u64)slot_dma;
925
926
927 dev->dsr->async_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES;
928 ret = pvrdma_page_dir_init(dev, &dev->async_pdir,
929 dev->dsr->async_ring_pages.num_pages, true);
930 if (ret)
931 goto err_free_slots;
932 dev->async_ring_state = dev->async_pdir.pages[0];
933 dev->dsr->async_ring_pages.pdir_dma = dev->async_pdir.dir_dma;
934
935
936 dev->dsr->cq_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES;
937 ret = pvrdma_page_dir_init(dev, &dev->cq_pdir,
938 dev->dsr->cq_ring_pages.num_pages, true);
939 if (ret)
940 goto err_free_async_ring;
941 dev->cq_ring_state = dev->cq_pdir.pages[0];
942 dev->dsr->cq_ring_pages.pdir_dma = dev->cq_pdir.dir_dma;
943
944
945
946
947
948
949
950 pvrdma_write_reg(dev, PVRDMA_REG_DSRLOW, (u32)dev->dsrbase);
951 pvrdma_write_reg(dev, PVRDMA_REG_DSRHIGH,
952 (u32)((u64)(dev->dsrbase) >> 32));
953
954
955 mb();
956
957
958 if (!PVRDMA_SUPPORTED(dev)) {
959 dev_err(&pdev->dev, "driver needs RoCE v1 or v2 support\n");
960 ret = -EFAULT;
961 goto err_free_cq_ring;
962 }
963
964
965 pdev_net = pci_get_slot(pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), 0));
966 if (!pdev_net) {
967 dev_err(&pdev->dev, "failed to find paired net device\n");
968 ret = -ENODEV;
969 goto err_free_cq_ring;
970 }
971
972 if (pdev_net->vendor != PCI_VENDOR_ID_VMWARE ||
973 pdev_net->device != PCI_DEVICE_ID_VMWARE_VMXNET3) {
974 dev_err(&pdev->dev, "failed to find paired vmxnet3 device\n");
975 pci_dev_put(pdev_net);
976 ret = -ENODEV;
977 goto err_free_cq_ring;
978 }
979
980 dev->netdev = pci_get_drvdata(pdev_net);
981 pci_dev_put(pdev_net);
982 if (!dev->netdev) {
983 dev_err(&pdev->dev, "failed to get vmxnet3 device\n");
984 ret = -ENODEV;
985 goto err_free_cq_ring;
986 }
987 dev_hold(dev->netdev);
988
989 dev_info(&pdev->dev, "paired device to %s\n", dev->netdev->name);
990
991
992 ret = pvrdma_alloc_intrs(dev);
993 if (ret) {
994 dev_err(&pdev->dev, "failed to allocate interrupts\n");
995 ret = -ENOMEM;
996 goto err_free_cq_ring;
997 }
998
999
1000 ret = pvrdma_uar_table_init(dev);
1001 if (ret) {
1002 dev_err(&pdev->dev, "failed to allocate UAR table\n");
1003 ret = -ENOMEM;
1004 goto err_free_intrs;
1005 }
1006
1007
1008 dev->sgid_tbl = kcalloc(dev->dsr->caps.gid_tbl_len,
1009 sizeof(union ib_gid), GFP_KERNEL);
1010 if (!dev->sgid_tbl) {
1011 ret = -ENOMEM;
1012 goto err_free_uar_table;
1013 }
1014 dev_dbg(&pdev->dev, "gid table len %d\n", dev->dsr->caps.gid_tbl_len);
1015
1016 pvrdma_enable_intrs(dev);
1017
1018
1019 pvrdma_write_reg(dev, PVRDMA_REG_CTL, PVRDMA_DEVICE_CTL_ACTIVATE);
1020
1021
1022 mb();
1023
1024
1025 ret = pvrdma_read_reg(dev, PVRDMA_REG_ERR);
1026 if (ret != 0) {
1027 dev_err(&pdev->dev, "failed to activate device\n");
1028 ret = -EFAULT;
1029 goto err_disable_intr;
1030 }
1031
1032
1033 ret = pvrdma_register_device(dev);
1034 if (ret) {
1035 dev_err(&pdev->dev, "failed to register IB device\n");
1036 goto err_disable_intr;
1037 }
1038
1039 dev->nb_netdev.notifier_call = pvrdma_netdevice_event;
1040 ret = register_netdevice_notifier(&dev->nb_netdev);
1041 if (ret) {
1042 dev_err(&pdev->dev, "failed to register netdevice events\n");
1043 goto err_unreg_ibdev;
1044 }
1045
1046 dev_info(&pdev->dev, "attached to device\n");
1047 return 0;
1048
1049err_unreg_ibdev:
1050 ib_unregister_device(&dev->ib_dev);
1051err_disable_intr:
1052 pvrdma_disable_intrs(dev);
1053 kfree(dev->sgid_tbl);
1054err_free_uar_table:
1055 pvrdma_uar_table_cleanup(dev);
1056err_free_intrs:
1057 pvrdma_free_irq(dev);
1058 pci_free_irq_vectors(pdev);
1059err_free_cq_ring:
1060 if (dev->netdev) {
1061 dev_put(dev->netdev);
1062 dev->netdev = NULL;
1063 }
1064 pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
1065err_free_async_ring:
1066 pvrdma_page_dir_cleanup(dev, &dev->async_pdir);
1067err_free_slots:
1068 pvrdma_free_slots(dev);
1069err_free_dsr:
1070 dma_free_coherent(&pdev->dev, sizeof(*dev->dsr), dev->dsr,
1071 dev->dsrbase);
1072err_uar_unmap:
1073 iounmap(dev->driver_uar.map);
1074err_unmap_regs:
1075 iounmap(dev->regs);
1076err_free_resource:
1077 pci_release_regions(pdev);
1078err_disable_pdev:
1079 pci_disable_device(pdev);
1080 pci_set_drvdata(pdev, NULL);
1081err_free_device:
1082 mutex_lock(&pvrdma_device_list_lock);
1083 list_del(&dev->device_link);
1084 mutex_unlock(&pvrdma_device_list_lock);
1085 ib_dealloc_device(&dev->ib_dev);
1086 return ret;
1087}
1088
1089static void pvrdma_pci_remove(struct pci_dev *pdev)
1090{
1091 struct pvrdma_dev *dev = pci_get_drvdata(pdev);
1092
1093 if (!dev)
1094 return;
1095
1096 dev_info(&pdev->dev, "detaching from device\n");
1097
1098 unregister_netdevice_notifier(&dev->nb_netdev);
1099 dev->nb_netdev.notifier_call = NULL;
1100
1101 flush_workqueue(event_wq);
1102
1103 if (dev->netdev) {
1104 dev_put(dev->netdev);
1105 dev->netdev = NULL;
1106 }
1107
1108
1109 ib_unregister_device(&dev->ib_dev);
1110
1111 mutex_lock(&pvrdma_device_list_lock);
1112 list_del(&dev->device_link);
1113 mutex_unlock(&pvrdma_device_list_lock);
1114
1115 pvrdma_disable_intrs(dev);
1116 pvrdma_free_irq(dev);
1117 pci_free_irq_vectors(pdev);
1118
1119
1120 pvrdma_write_reg(dev, PVRDMA_REG_CTL, PVRDMA_DEVICE_CTL_RESET);
1121 pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
1122 pvrdma_page_dir_cleanup(dev, &dev->async_pdir);
1123 pvrdma_free_slots(dev);
1124 dma_free_coherent(&pdev->dev, sizeof(*dev->dsr), dev->dsr,
1125 dev->dsrbase);
1126
1127 iounmap(dev->regs);
1128 kfree(dev->sgid_tbl);
1129 kfree(dev->cq_tbl);
1130 kfree(dev->srq_tbl);
1131 kfree(dev->qp_tbl);
1132 pvrdma_uar_table_cleanup(dev);
1133 iounmap(dev->driver_uar.map);
1134
1135 ib_dealloc_device(&dev->ib_dev);
1136
1137
1138 pci_release_regions(pdev);
1139 pci_disable_device(pdev);
1140 pci_set_drvdata(pdev, NULL);
1141}
1142
1143static const struct pci_device_id pvrdma_pci_table[] = {
1144 { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_PVRDMA), },
1145 { 0 },
1146};
1147
1148MODULE_DEVICE_TABLE(pci, pvrdma_pci_table);
1149
1150static struct pci_driver pvrdma_driver = {
1151 .name = DRV_NAME,
1152 .id_table = pvrdma_pci_table,
1153 .probe = pvrdma_pci_probe,
1154 .remove = pvrdma_pci_remove,
1155};
1156
1157static int __init pvrdma_init(void)
1158{
1159 int err;
1160
1161 event_wq = alloc_ordered_workqueue("pvrdma_event_wq", WQ_MEM_RECLAIM);
1162 if (!event_wq)
1163 return -ENOMEM;
1164
1165 err = pci_register_driver(&pvrdma_driver);
1166 if (err)
1167 destroy_workqueue(event_wq);
1168
1169 return err;
1170}
1171
1172static void __exit pvrdma_cleanup(void)
1173{
1174 pci_unregister_driver(&pvrdma_driver);
1175
1176 destroy_workqueue(event_wq);
1177}
1178
1179module_init(pvrdma_init);
1180module_exit(pvrdma_cleanup);
1181
1182MODULE_AUTHOR("VMware, Inc");
1183MODULE_DESCRIPTION("VMware Paravirtual RDMA driver");
1184MODULE_LICENSE("Dual BSD/GPL");
1185