1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48#include <linux/module.h>
49#include <linux/kernel.h>
50#include <linux/dma-mapping.h>
51#include "vt.h"
52#include "trace.h"
53
54#define RVT_UVERBS_ABI_VERSION 2
55
56MODULE_LICENSE("Dual BSD/GPL");
57MODULE_DESCRIPTION("RDMA Verbs Transport Library");
58
59static int rvt_init(void)
60{
61
62
63
64
65 return 0;
66}
67module_init(rvt_init);
68
69static void rvt_cleanup(void)
70{
71
72
73
74
75
76}
77module_exit(rvt_cleanup);
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92struct rvt_dev_info *rvt_alloc_device(size_t size, int nports)
93{
94 struct rvt_dev_info *rdi = ERR_PTR(-ENOMEM);
95
96 rdi = (struct rvt_dev_info *)ib_alloc_device(size);
97 if (!rdi)
98 return rdi;
99
100 rdi->ports = kcalloc(nports,
101 sizeof(struct rvt_ibport **),
102 GFP_KERNEL);
103 if (!rdi->ports)
104 ib_dealloc_device(&rdi->ibdev);
105
106 return rdi;
107}
108EXPORT_SYMBOL(rvt_alloc_device);
109
110
111
112
113
114
115
116void rvt_dealloc_device(struct rvt_dev_info *rdi)
117{
118 kfree(rdi->ports);
119 ib_dealloc_device(&rdi->ibdev);
120}
121EXPORT_SYMBOL(rvt_dealloc_device);
122
123static int rvt_query_device(struct ib_device *ibdev,
124 struct ib_device_attr *props,
125 struct ib_udata *uhw)
126{
127 struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
128
129 if (uhw->inlen || uhw->outlen)
130 return -EINVAL;
131
132
133
134 *props = rdi->dparms.props;
135 return 0;
136}
137
138static int rvt_modify_device(struct ib_device *device,
139 int device_modify_mask,
140 struct ib_device_modify *device_modify)
141{
142
143
144
145
146
147 return -EOPNOTSUPP;
148}
149
150
151
152
153
154
155
156
157
158static int rvt_query_port(struct ib_device *ibdev, u8 port_num,
159 struct ib_port_attr *props)
160{
161 struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
162 struct rvt_ibport *rvp;
163 int port_index = ibport_num_to_idx(ibdev, port_num);
164
165 if (port_index < 0)
166 return -EINVAL;
167
168 rvp = rdi->ports[port_index];
169
170 props->sm_lid = rvp->sm_lid;
171 props->sm_sl = rvp->sm_sl;
172 props->port_cap_flags = rvp->port_cap_flags;
173 props->max_msg_sz = 0x80000000;
174 props->pkey_tbl_len = rvt_get_npkeys(rdi);
175 props->bad_pkey_cntr = rvp->pkey_violations;
176 props->qkey_viol_cntr = rvp->qkey_violations;
177 props->subnet_timeout = rvp->subnet_timeout;
178 props->init_type_reply = 0;
179
180
181 return rdi->driver_f.query_port_state(rdi, port_num, props);
182}
183
184
185
186
187
188
189
190
191
192
193static int rvt_modify_port(struct ib_device *ibdev, u8 port_num,
194 int port_modify_mask, struct ib_port_modify *props)
195{
196 struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
197 struct rvt_ibport *rvp;
198 int ret = 0;
199 int port_index = ibport_num_to_idx(ibdev, port_num);
200
201 if (port_index < 0)
202 return -EINVAL;
203
204 rvp = rdi->ports[port_index];
205 rvp->port_cap_flags |= props->set_port_cap_mask;
206 rvp->port_cap_flags &= ~props->clr_port_cap_mask;
207
208 if (props->set_port_cap_mask || props->clr_port_cap_mask)
209 rdi->driver_f.cap_mask_chg(rdi, port_num);
210 if (port_modify_mask & IB_PORT_SHUTDOWN)
211 ret = rdi->driver_f.shut_down_port(rdi, port_num);
212 if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
213 rvp->qkey_violations = 0;
214
215 return ret;
216}
217
218
219
220
221
222
223
224
225
226static int rvt_query_pkey(struct ib_device *ibdev, u8 port_num, u16 index,
227 u16 *pkey)
228{
229
230
231
232
233
234
235 struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
236 int port_index;
237
238 port_index = ibport_num_to_idx(ibdev, port_num);
239 if (port_index < 0)
240 return -EINVAL;
241
242 if (index >= rvt_get_npkeys(rdi))
243 return -EINVAL;
244
245 *pkey = rvt_get_pkey(rdi, port_index, index);
246 return 0;
247}
248
249
250
251
252
253
254
255
256
257
258static int rvt_query_gid(struct ib_device *ibdev, u8 port_num,
259 int guid_index, union ib_gid *gid)
260{
261 struct rvt_dev_info *rdi;
262 struct rvt_ibport *rvp;
263 int port_index;
264
265
266
267
268
269
270 port_index = ibport_num_to_idx(ibdev, port_num);
271 if (port_index < 0)
272 return -EINVAL;
273
274 rdi = ib_to_rvt(ibdev);
275 rvp = rdi->ports[port_index];
276
277 gid->global.subnet_prefix = rvp->gid_prefix;
278
279 return rdi->driver_f.get_guid_be(rdi, rvp, guid_index,
280 &gid->global.interface_id);
281}
282
283struct rvt_ucontext {
284 struct ib_ucontext ibucontext;
285};
286
287static inline struct rvt_ucontext *to_iucontext(struct ib_ucontext
288 *ibucontext)
289{
290 return container_of(ibucontext, struct rvt_ucontext, ibucontext);
291}
292
293
294
295
296
297
298static struct ib_ucontext *rvt_alloc_ucontext(struct ib_device *ibdev,
299 struct ib_udata *udata)
300{
301 struct rvt_ucontext *context;
302
303 context = kmalloc(sizeof(*context), GFP_KERNEL);
304 if (!context)
305 return ERR_PTR(-ENOMEM);
306 return &context->ibucontext;
307}
308
309
310
311
312
313static int rvt_dealloc_ucontext(struct ib_ucontext *context)
314{
315 kfree(to_iucontext(context));
316 return 0;
317}
318
319static int rvt_get_port_immutable(struct ib_device *ibdev, u8 port_num,
320 struct ib_port_immutable *immutable)
321{
322 struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
323 struct ib_port_attr attr;
324 int err, port_index;
325
326 port_index = ibport_num_to_idx(ibdev, port_num);
327 if (port_index < 0)
328 return -EINVAL;
329
330 immutable->core_cap_flags = rdi->dparms.core_cap_flags;
331
332 err = ib_query_port(ibdev, port_num, &attr);
333 if (err)
334 return err;
335
336 immutable->pkey_tbl_len = attr.pkey_tbl_len;
337 immutable->gid_tbl_len = attr.gid_tbl_len;
338 immutable->max_mad_size = rdi->dparms.max_mad_size;
339
340 return 0;
341}
342
343enum {
344 MISC,
345 QUERY_DEVICE,
346 MODIFY_DEVICE,
347 QUERY_PORT,
348 MODIFY_PORT,
349 QUERY_PKEY,
350 QUERY_GID,
351 ALLOC_UCONTEXT,
352 DEALLOC_UCONTEXT,
353 GET_PORT_IMMUTABLE,
354 CREATE_QP,
355 MODIFY_QP,
356 DESTROY_QP,
357 QUERY_QP,
358 POST_SEND,
359 POST_RECV,
360 POST_SRQ_RECV,
361 CREATE_AH,
362 DESTROY_AH,
363 MODIFY_AH,
364 QUERY_AH,
365 CREATE_SRQ,
366 MODIFY_SRQ,
367 DESTROY_SRQ,
368 QUERY_SRQ,
369 ATTACH_MCAST,
370 DETACH_MCAST,
371 GET_DMA_MR,
372 REG_USER_MR,
373 DEREG_MR,
374 ALLOC_MR,
375 MAP_MR_SG,
376 ALLOC_FMR,
377 MAP_PHYS_FMR,
378 UNMAP_FMR,
379 DEALLOC_FMR,
380 MMAP,
381 CREATE_CQ,
382 DESTROY_CQ,
383 POLL_CQ,
384 REQ_NOTFIY_CQ,
385 RESIZE_CQ,
386 ALLOC_PD,
387 DEALLOC_PD,
388 _VERB_IDX_MAX
389};
390
391static inline int check_driver_override(struct rvt_dev_info *rdi,
392 size_t offset, void *func)
393{
394 if (!*(void **)((void *)&rdi->ibdev + offset)) {
395 *(void **)((void *)&rdi->ibdev + offset) = func;
396 return 0;
397 }
398
399 return 1;
400}
401
402static noinline int check_support(struct rvt_dev_info *rdi, int verb)
403{
404 switch (verb) {
405 case MISC:
406
407
408
409
410 if ((!rdi->driver_f.port_callback) ||
411 (!rdi->driver_f.get_card_name) ||
412 (!rdi->driver_f.get_pci_dev))
413 return -EINVAL;
414 break;
415
416 case QUERY_DEVICE:
417 check_driver_override(rdi, offsetof(struct ib_device,
418 query_device),
419 rvt_query_device);
420 break;
421
422 case MODIFY_DEVICE:
423
424
425
426
427 if (!check_driver_override(rdi, offsetof(struct ib_device,
428 modify_device),
429 rvt_modify_device))
430 return -EOPNOTSUPP;
431 break;
432
433 case QUERY_PORT:
434 if (!check_driver_override(rdi, offsetof(struct ib_device,
435 query_port),
436 rvt_query_port))
437 if (!rdi->driver_f.query_port_state)
438 return -EINVAL;
439 break;
440
441 case MODIFY_PORT:
442 if (!check_driver_override(rdi, offsetof(struct ib_device,
443 modify_port),
444 rvt_modify_port))
445 if (!rdi->driver_f.cap_mask_chg ||
446 !rdi->driver_f.shut_down_port)
447 return -EINVAL;
448 break;
449
450 case QUERY_PKEY:
451 check_driver_override(rdi, offsetof(struct ib_device,
452 query_pkey),
453 rvt_query_pkey);
454 break;
455
456 case QUERY_GID:
457 if (!check_driver_override(rdi, offsetof(struct ib_device,
458 query_gid),
459 rvt_query_gid))
460 if (!rdi->driver_f.get_guid_be)
461 return -EINVAL;
462 break;
463
464 case ALLOC_UCONTEXT:
465 check_driver_override(rdi, offsetof(struct ib_device,
466 alloc_ucontext),
467 rvt_alloc_ucontext);
468 break;
469
470 case DEALLOC_UCONTEXT:
471 check_driver_override(rdi, offsetof(struct ib_device,
472 dealloc_ucontext),
473 rvt_dealloc_ucontext);
474 break;
475
476 case GET_PORT_IMMUTABLE:
477 check_driver_override(rdi, offsetof(struct ib_device,
478 get_port_immutable),
479 rvt_get_port_immutable);
480 break;
481
482 case CREATE_QP:
483 if (!check_driver_override(rdi, offsetof(struct ib_device,
484 create_qp),
485 rvt_create_qp))
486 if (!rdi->driver_f.qp_priv_alloc ||
487 !rdi->driver_f.qp_priv_free ||
488 !rdi->driver_f.notify_qp_reset ||
489 !rdi->driver_f.flush_qp_waiters ||
490 !rdi->driver_f.stop_send_queue ||
491 !rdi->driver_f.quiesce_qp)
492 return -EINVAL;
493 break;
494
495 case MODIFY_QP:
496 if (!check_driver_override(rdi, offsetof(struct ib_device,
497 modify_qp),
498 rvt_modify_qp))
499 if (!rdi->driver_f.notify_qp_reset ||
500 !rdi->driver_f.schedule_send ||
501 !rdi->driver_f.get_pmtu_from_attr ||
502 !rdi->driver_f.flush_qp_waiters ||
503 !rdi->driver_f.stop_send_queue ||
504 !rdi->driver_f.quiesce_qp ||
505 !rdi->driver_f.notify_error_qp ||
506 !rdi->driver_f.mtu_from_qp ||
507 !rdi->driver_f.mtu_to_path_mtu)
508 return -EINVAL;
509 break;
510
511 case DESTROY_QP:
512 if (!check_driver_override(rdi, offsetof(struct ib_device,
513 destroy_qp),
514 rvt_destroy_qp))
515 if (!rdi->driver_f.qp_priv_free ||
516 !rdi->driver_f.notify_qp_reset ||
517 !rdi->driver_f.flush_qp_waiters ||
518 !rdi->driver_f.stop_send_queue ||
519 !rdi->driver_f.quiesce_qp)
520 return -EINVAL;
521 break;
522
523 case QUERY_QP:
524 check_driver_override(rdi, offsetof(struct ib_device,
525 query_qp),
526 rvt_query_qp);
527 break;
528
529 case POST_SEND:
530 if (!check_driver_override(rdi, offsetof(struct ib_device,
531 post_send),
532 rvt_post_send))
533 if (!rdi->driver_f.schedule_send ||
534 !rdi->driver_f.do_send ||
535 !rdi->post_parms)
536 return -EINVAL;
537 break;
538
539 case POST_RECV:
540 check_driver_override(rdi, offsetof(struct ib_device,
541 post_recv),
542 rvt_post_recv);
543 break;
544 case POST_SRQ_RECV:
545 check_driver_override(rdi, offsetof(struct ib_device,
546 post_srq_recv),
547 rvt_post_srq_recv);
548 break;
549
550 case CREATE_AH:
551 check_driver_override(rdi, offsetof(struct ib_device,
552 create_ah),
553 rvt_create_ah);
554 break;
555
556 case DESTROY_AH:
557 check_driver_override(rdi, offsetof(struct ib_device,
558 destroy_ah),
559 rvt_destroy_ah);
560 break;
561
562 case MODIFY_AH:
563 check_driver_override(rdi, offsetof(struct ib_device,
564 modify_ah),
565 rvt_modify_ah);
566 break;
567
568 case QUERY_AH:
569 check_driver_override(rdi, offsetof(struct ib_device,
570 query_ah),
571 rvt_query_ah);
572 break;
573
574 case CREATE_SRQ:
575 check_driver_override(rdi, offsetof(struct ib_device,
576 create_srq),
577 rvt_create_srq);
578 break;
579
580 case MODIFY_SRQ:
581 check_driver_override(rdi, offsetof(struct ib_device,
582 modify_srq),
583 rvt_modify_srq);
584 break;
585
586 case DESTROY_SRQ:
587 check_driver_override(rdi, offsetof(struct ib_device,
588 destroy_srq),
589 rvt_destroy_srq);
590 break;
591
592 case QUERY_SRQ:
593 check_driver_override(rdi, offsetof(struct ib_device,
594 query_srq),
595 rvt_query_srq);
596 break;
597
598 case ATTACH_MCAST:
599 check_driver_override(rdi, offsetof(struct ib_device,
600 attach_mcast),
601 rvt_attach_mcast);
602 break;
603
604 case DETACH_MCAST:
605 check_driver_override(rdi, offsetof(struct ib_device,
606 detach_mcast),
607 rvt_detach_mcast);
608 break;
609
610 case GET_DMA_MR:
611 check_driver_override(rdi, offsetof(struct ib_device,
612 get_dma_mr),
613 rvt_get_dma_mr);
614 break;
615
616 case REG_USER_MR:
617 check_driver_override(rdi, offsetof(struct ib_device,
618 reg_user_mr),
619 rvt_reg_user_mr);
620 break;
621
622 case DEREG_MR:
623 check_driver_override(rdi, offsetof(struct ib_device,
624 dereg_mr),
625 rvt_dereg_mr);
626 break;
627
628 case ALLOC_FMR:
629 check_driver_override(rdi, offsetof(struct ib_device,
630 alloc_fmr),
631 rvt_alloc_fmr);
632 break;
633
634 case ALLOC_MR:
635 check_driver_override(rdi, offsetof(struct ib_device,
636 alloc_mr),
637 rvt_alloc_mr);
638 break;
639
640 case MAP_MR_SG:
641 check_driver_override(rdi, offsetof(struct ib_device,
642 map_mr_sg),
643 rvt_map_mr_sg);
644 break;
645
646 case MAP_PHYS_FMR:
647 check_driver_override(rdi, offsetof(struct ib_device,
648 map_phys_fmr),
649 rvt_map_phys_fmr);
650 break;
651
652 case UNMAP_FMR:
653 check_driver_override(rdi, offsetof(struct ib_device,
654 unmap_fmr),
655 rvt_unmap_fmr);
656 break;
657
658 case DEALLOC_FMR:
659 check_driver_override(rdi, offsetof(struct ib_device,
660 dealloc_fmr),
661 rvt_dealloc_fmr);
662 break;
663
664 case MMAP:
665 check_driver_override(rdi, offsetof(struct ib_device,
666 mmap),
667 rvt_mmap);
668 break;
669
670 case CREATE_CQ:
671 check_driver_override(rdi, offsetof(struct ib_device,
672 create_cq),
673 rvt_create_cq);
674 break;
675
676 case DESTROY_CQ:
677 check_driver_override(rdi, offsetof(struct ib_device,
678 destroy_cq),
679 rvt_destroy_cq);
680 break;
681
682 case POLL_CQ:
683 check_driver_override(rdi, offsetof(struct ib_device,
684 poll_cq),
685 rvt_poll_cq);
686 break;
687
688 case REQ_NOTFIY_CQ:
689 check_driver_override(rdi, offsetof(struct ib_device,
690 req_notify_cq),
691 rvt_req_notify_cq);
692 break;
693
694 case RESIZE_CQ:
695 check_driver_override(rdi, offsetof(struct ib_device,
696 resize_cq),
697 rvt_resize_cq);
698 break;
699
700 case ALLOC_PD:
701 check_driver_override(rdi, offsetof(struct ib_device,
702 alloc_pd),
703 rvt_alloc_pd);
704 break;
705
706 case DEALLOC_PD:
707 check_driver_override(rdi, offsetof(struct ib_device,
708 dealloc_pd),
709 rvt_dealloc_pd);
710 break;
711
712 default:
713 return -EINVAL;
714 }
715
716 return 0;
717}
718
719
720
721
722
723
724
725
726
727
728int rvt_register_device(struct rvt_dev_info *rdi)
729{
730 int ret = 0, i;
731
732 if (!rdi)
733 return -EINVAL;
734
735
736
737
738
739 for (i = 0; i < _VERB_IDX_MAX; i++)
740 if (check_support(rdi, i)) {
741 pr_err("Driver support req not met at %d\n", i);
742 return -EINVAL;
743 }
744
745
746
747 trace_rvt_dbg(rdi, "Driver attempting registration");
748 rvt_mmap_init(rdi);
749
750
751 ret = rvt_driver_qp_init(rdi);
752 if (ret) {
753 pr_err("Error in driver QP init.\n");
754 return -EINVAL;
755 }
756
757
758 spin_lock_init(&rdi->n_ahs_lock);
759 rdi->n_ahs_allocated = 0;
760
761
762 rvt_driver_srq_init(rdi);
763
764
765 rvt_driver_mcast_init(rdi);
766
767
768 ret = rvt_driver_mr_init(rdi);
769 if (ret) {
770 pr_err("Error in driver MR init.\n");
771 goto bail_no_mr;
772 }
773
774
775 ret = rvt_driver_cq_init(rdi);
776 if (ret) {
777 pr_err("Error in driver CQ init.\n");
778 goto bail_mr;
779 }
780
781
782 rdi->ibdev.dev.dma_ops = rdi->ibdev.dev.dma_ops ? : &dma_virt_ops;
783
784
785 spin_lock_init(&rdi->n_pds_lock);
786 rdi->n_pds_allocated = 0;
787
788
789
790
791
792
793
794 rdi->ibdev.uverbs_abi_ver = RVT_UVERBS_ABI_VERSION;
795 rdi->ibdev.uverbs_cmd_mask =
796 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
797 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
798 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
799 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
800 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
801 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
802 (1ull << IB_USER_VERBS_CMD_MODIFY_AH) |
803 (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
804 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
805 (1ull << IB_USER_VERBS_CMD_REG_MR) |
806 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
807 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
808 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
809 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
810 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
811 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
812 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
813 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
814 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
815 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
816 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
817 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
818 (1ull << IB_USER_VERBS_CMD_POST_RECV) |
819 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
820 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
821 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
822 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
823 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
824 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
825 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
826 rdi->ibdev.node_type = RDMA_NODE_IB_CA;
827 rdi->ibdev.num_comp_vectors = 1;
828
829
830 ret = ib_register_device(&rdi->ibdev, rdi->driver_f.port_callback);
831 if (ret) {
832 rvt_pr_err(rdi, "Failed to register driver with ib core.\n");
833 goto bail_cq;
834 }
835
836 rvt_create_mad_agents(rdi);
837
838 rvt_pr_info(rdi, "Registration with rdmavt done.\n");
839 return ret;
840
841bail_cq:
842 rvt_cq_exit(rdi);
843
844bail_mr:
845 rvt_mr_exit(rdi);
846
847bail_no_mr:
848 rvt_qp_exit(rdi);
849
850 return ret;
851}
852EXPORT_SYMBOL(rvt_register_device);
853
854
855
856
857
858void rvt_unregister_device(struct rvt_dev_info *rdi)
859{
860 trace_rvt_dbg(rdi, "Driver is unregistering.");
861 if (!rdi)
862 return;
863
864 rvt_free_mad_agents(rdi);
865
866 ib_unregister_device(&rdi->ibdev);
867 rvt_cq_exit(rdi);
868 rvt_mr_exit(rdi);
869 rvt_qp_exit(rdi);
870}
871EXPORT_SYMBOL(rvt_unregister_device);
872
873
874
875
876
877
878
879
880
881
882
883
884int rvt_init_port(struct rvt_dev_info *rdi, struct rvt_ibport *port,
885 int port_index, u16 *pkey_table)
886{
887
888 rdi->ports[port_index] = port;
889 rdi->ports[port_index]->pkey_table = pkey_table;
890
891 return 0;
892}
893EXPORT_SYMBOL(rvt_init_port);
894