1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39#include <rdma/ib_smi.h>
40#include <rdma/ib_umem.h>
41#include <rdma/ib_user_verbs.h>
42#include <linux/mm.h>
43
44#include "mthca_dev.h"
45#include "mthca_cmd.h"
46#include "mthca_user.h"
47#include "mthca_memfree.h"
48
49static void init_query_mad(struct ib_smp *mad)
50{
51 mad->base_version = 1;
52 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
53 mad->class_version = 1;
54 mad->method = IB_MGMT_METHOD_GET;
55}
56
57static int mthca_query_device(struct ib_device *ibdev,
58 struct ib_device_attr *props)
59{
60 struct ib_smp *in_mad = NULL;
61 struct ib_smp *out_mad = NULL;
62 int err = -ENOMEM;
63 struct mthca_dev* mdev = to_mdev(ibdev);
64
65 u8 status;
66
67 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
68 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
69 if (!in_mad || !out_mad)
70 goto out;
71
72 memset(props, 0, sizeof *props);
73
74 props->fw_ver = mdev->fw_ver;
75
76 init_query_mad(in_mad);
77 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
78
79 err = mthca_MAD_IFC(mdev, 1, 1,
80 1, NULL, NULL, in_mad, out_mad,
81 &status);
82 if (err)
83 goto out;
84 if (status) {
85 err = -EINVAL;
86 goto out;
87 }
88
89 props->device_cap_flags = mdev->device_cap_flags;
90 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
91 0xffffff;
92 props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30));
93 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
94 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
95
96 props->max_mr_size = ~0ull;
97 props->page_size_cap = mdev->limits.page_size_cap;
98 props->max_qp = mdev->limits.num_qps - mdev->limits.reserved_qps;
99 props->max_qp_wr = mdev->limits.max_wqes;
100 props->max_sge = mdev->limits.max_sg;
101 props->max_cq = mdev->limits.num_cqs - mdev->limits.reserved_cqs;
102 props->max_cqe = mdev->limits.max_cqes;
103 props->max_mr = mdev->limits.num_mpts - mdev->limits.reserved_mrws;
104 props->max_pd = mdev->limits.num_pds - mdev->limits.reserved_pds;
105 props->max_qp_rd_atom = 1 << mdev->qp_table.rdb_shift;
106 props->max_qp_init_rd_atom = mdev->limits.max_qp_init_rdma;
107 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
108 props->max_srq = mdev->limits.num_srqs - mdev->limits.reserved_srqs;
109 props->max_srq_wr = mdev->limits.max_srq_wqes;
110 props->max_srq_sge = mdev->limits.max_srq_sge;
111 props->local_ca_ack_delay = mdev->limits.local_ca_ack_delay;
112 props->atomic_cap = mdev->limits.flags & DEV_LIM_FLAG_ATOMIC ?
113 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
114 props->max_pkeys = mdev->limits.pkey_table_len;
115 props->max_mcast_grp = mdev->limits.num_mgms + mdev->limits.num_amgms;
116 props->max_mcast_qp_attach = MTHCA_QP_PER_MGM;
117 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
118 props->max_mcast_grp;
119
120
121
122
123
124 if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
125 props->max_map_per_fmr = 255;
126 else
127 props->max_map_per_fmr =
128 (1 << (32 - ilog2(mdev->limits.num_mpts))) - 1;
129
130 err = 0;
131 out:
132 kfree(in_mad);
133 kfree(out_mad);
134 return err;
135}
136
137static int mthca_query_port(struct ib_device *ibdev,
138 u8 port, struct ib_port_attr *props)
139{
140 struct ib_smp *in_mad = NULL;
141 struct ib_smp *out_mad = NULL;
142 int err = -ENOMEM;
143 u8 status;
144
145 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
146 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
147 if (!in_mad || !out_mad)
148 goto out;
149
150 memset(props, 0, sizeof *props);
151
152 init_query_mad(in_mad);
153 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
154 in_mad->attr_mod = cpu_to_be32(port);
155
156 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
157 port, NULL, NULL, in_mad, out_mad,
158 &status);
159 if (err)
160 goto out;
161 if (status) {
162 err = -EINVAL;
163 goto out;
164 }
165
166 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
167 props->lmc = out_mad->data[34] & 0x7;
168 props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
169 props->sm_sl = out_mad->data[36] & 0xf;
170 props->state = out_mad->data[32] & 0xf;
171 props->phys_state = out_mad->data[33] >> 4;
172 props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
173 props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len;
174 props->max_msg_sz = 0x80000000;
175 props->pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len;
176 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
177 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
178 props->active_width = out_mad->data[31] & 0xf;
179 props->active_speed = out_mad->data[35] >> 4;
180 props->max_mtu = out_mad->data[41] & 0xf;
181 props->active_mtu = out_mad->data[36] >> 4;
182 props->subnet_timeout = out_mad->data[51] & 0x1f;
183 props->max_vl_num = out_mad->data[37] >> 4;
184 props->init_type_reply = out_mad->data[41] >> 4;
185
186 out:
187 kfree(in_mad);
188 kfree(out_mad);
189 return err;
190}
191
192static int mthca_modify_device(struct ib_device *ibdev,
193 int mask,
194 struct ib_device_modify *props)
195{
196 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
197 return -EOPNOTSUPP;
198
199 if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
200 if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
201 return -ERESTARTSYS;
202 memcpy(ibdev->node_desc, props->node_desc, 64);
203 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
204 }
205
206 return 0;
207}
208
209static int mthca_modify_port(struct ib_device *ibdev,
210 u8 port, int port_modify_mask,
211 struct ib_port_modify *props)
212{
213 struct mthca_set_ib_param set_ib;
214 struct ib_port_attr attr;
215 int err;
216 u8 status;
217
218 if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
219 return -ERESTARTSYS;
220
221 err = mthca_query_port(ibdev, port, &attr);
222 if (err)
223 goto out;
224
225 set_ib.set_si_guid = 0;
226 set_ib.reset_qkey_viol = !!(port_modify_mask & IB_PORT_RESET_QKEY_CNTR);
227
228 set_ib.cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
229 ~props->clr_port_cap_mask;
230
231 err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port, &status);
232 if (err)
233 goto out;
234 if (status) {
235 err = -EINVAL;
236 goto out;
237 }
238
239out:
240 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
241 return err;
242}
243
244static int mthca_query_pkey(struct ib_device *ibdev,
245 u8 port, u16 index, u16 *pkey)
246{
247 struct ib_smp *in_mad = NULL;
248 struct ib_smp *out_mad = NULL;
249 int err = -ENOMEM;
250 u8 status;
251
252 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
253 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
254 if (!in_mad || !out_mad)
255 goto out;
256
257 init_query_mad(in_mad);
258 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
259 in_mad->attr_mod = cpu_to_be32(index / 32);
260
261 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
262 port, NULL, NULL, in_mad, out_mad,
263 &status);
264 if (err)
265 goto out;
266 if (status) {
267 err = -EINVAL;
268 goto out;
269 }
270
271 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
272
273 out:
274 kfree(in_mad);
275 kfree(out_mad);
276 return err;
277}
278
279static int mthca_query_gid(struct ib_device *ibdev, u8 port,
280 int index, union ib_gid *gid)
281{
282 struct ib_smp *in_mad = NULL;
283 struct ib_smp *out_mad = NULL;
284 int err = -ENOMEM;
285 u8 status;
286
287 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
288 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
289 if (!in_mad || !out_mad)
290 goto out;
291
292 init_query_mad(in_mad);
293 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
294 in_mad->attr_mod = cpu_to_be32(port);
295
296 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
297 port, NULL, NULL, in_mad, out_mad,
298 &status);
299 if (err)
300 goto out;
301 if (status) {
302 err = -EINVAL;
303 goto out;
304 }
305
306 memcpy(gid->raw, out_mad->data + 8, 8);
307
308 init_query_mad(in_mad);
309 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
310 in_mad->attr_mod = cpu_to_be32(index / 8);
311
312 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
313 port, NULL, NULL, in_mad, out_mad,
314 &status);
315 if (err)
316 goto out;
317 if (status) {
318 err = -EINVAL;
319 goto out;
320 }
321
322 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
323
324 out:
325 kfree(in_mad);
326 kfree(out_mad);
327 return err;
328}
329
330static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
331 struct ib_udata *udata)
332{
333 struct mthca_alloc_ucontext_resp uresp;
334 struct mthca_ucontext *context;
335 int err;
336
337 memset(&uresp, 0, sizeof uresp);
338
339 uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps;
340 if (mthca_is_memfree(to_mdev(ibdev)))
341 uresp.uarc_size = to_mdev(ibdev)->uar_table.uarc_size;
342 else
343 uresp.uarc_size = 0;
344
345 context = kmalloc(sizeof *context, GFP_KERNEL);
346 if (!context)
347 return ERR_PTR(-ENOMEM);
348
349 err = mthca_uar_alloc(to_mdev(ibdev), &context->uar);
350 if (err) {
351 kfree(context);
352 return ERR_PTR(err);
353 }
354
355 context->db_tab = mthca_init_user_db_tab(to_mdev(ibdev));
356 if (IS_ERR(context->db_tab)) {
357 err = PTR_ERR(context->db_tab);
358 mthca_uar_free(to_mdev(ibdev), &context->uar);
359 kfree(context);
360 return ERR_PTR(err);
361 }
362
363 if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) {
364 mthca_cleanup_user_db_tab(to_mdev(ibdev), &context->uar, context->db_tab);
365 mthca_uar_free(to_mdev(ibdev), &context->uar);
366 kfree(context);
367 return ERR_PTR(-EFAULT);
368 }
369
370 return &context->ibucontext;
371}
372
373static int mthca_dealloc_ucontext(struct ib_ucontext *context)
374{
375 mthca_cleanup_user_db_tab(to_mdev(context->device), &to_mucontext(context)->uar,
376 to_mucontext(context)->db_tab);
377 mthca_uar_free(to_mdev(context->device), &to_mucontext(context)->uar);
378 kfree(to_mucontext(context));
379
380 return 0;
381}
382
383static int mthca_mmap_uar(struct ib_ucontext *context,
384 struct vm_area_struct *vma)
385{
386 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
387 return -EINVAL;
388
389 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
390
391 if (io_remap_pfn_range(vma, vma->vm_start,
392 to_mucontext(context)->uar.pfn,
393 PAGE_SIZE, vma->vm_page_prot))
394 return -EAGAIN;
395
396 return 0;
397}
398
399static struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev,
400 struct ib_ucontext *context,
401 struct ib_udata *udata)
402{
403 struct mthca_pd *pd;
404 int err;
405
406 pd = kmalloc(sizeof *pd, GFP_KERNEL);
407 if (!pd)
408 return ERR_PTR(-ENOMEM);
409
410 err = mthca_pd_alloc(to_mdev(ibdev), !context, pd);
411 if (err) {
412 kfree(pd);
413 return ERR_PTR(err);
414 }
415
416 if (context) {
417 if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) {
418 mthca_pd_free(to_mdev(ibdev), pd);
419 kfree(pd);
420 return ERR_PTR(-EFAULT);
421 }
422 }
423
424 return &pd->ibpd;
425}
426
427static int mthca_dealloc_pd(struct ib_pd *pd)
428{
429 mthca_pd_free(to_mdev(pd->device), to_mpd(pd));
430 kfree(pd);
431
432 return 0;
433}
434
435static struct ib_ah *mthca_ah_create(struct ib_pd *pd,
436 struct ib_ah_attr *ah_attr)
437{
438 int err;
439 struct mthca_ah *ah;
440
441 ah = kmalloc(sizeof *ah, GFP_ATOMIC);
442 if (!ah)
443 return ERR_PTR(-ENOMEM);
444
445 err = mthca_create_ah(to_mdev(pd->device), to_mpd(pd), ah_attr, ah);
446 if (err) {
447 kfree(ah);
448 return ERR_PTR(err);
449 }
450
451 return &ah->ibah;
452}
453
454static int mthca_ah_destroy(struct ib_ah *ah)
455{
456 mthca_destroy_ah(to_mdev(ah->device), to_mah(ah));
457 kfree(ah);
458
459 return 0;
460}
461
462static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
463 struct ib_srq_init_attr *init_attr,
464 struct ib_udata *udata)
465{
466 struct mthca_create_srq ucmd;
467 struct mthca_ucontext *context = NULL;
468 struct mthca_srq *srq;
469 int err;
470
471 srq = kmalloc(sizeof *srq, GFP_KERNEL);
472 if (!srq)
473 return ERR_PTR(-ENOMEM);
474
475 if (pd->uobject) {
476 context = to_mucontext(pd->uobject->context);
477
478 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
479 err = -EFAULT;
480 goto err_free;
481 }
482
483 err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
484 context->db_tab, ucmd.db_index,
485 ucmd.db_page);
486
487 if (err)
488 goto err_free;
489
490 srq->mr.ibmr.lkey = ucmd.lkey;
491 srq->db_index = ucmd.db_index;
492 }
493
494 err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd),
495 &init_attr->attr, srq);
496
497 if (err && pd->uobject)
498 mthca_unmap_user_db(to_mdev(pd->device), &context->uar,
499 context->db_tab, ucmd.db_index);
500
501 if (err)
502 goto err_free;
503
504 if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) {
505 mthca_free_srq(to_mdev(pd->device), srq);
506 err = -EFAULT;
507 goto err_free;
508 }
509
510 return &srq->ibsrq;
511
512err_free:
513 kfree(srq);
514
515 return ERR_PTR(err);
516}
517
518static int mthca_destroy_srq(struct ib_srq *srq)
519{
520 struct mthca_ucontext *context;
521
522 if (srq->uobject) {
523 context = to_mucontext(srq->uobject->context);
524
525 mthca_unmap_user_db(to_mdev(srq->device), &context->uar,
526 context->db_tab, to_msrq(srq)->db_index);
527 }
528
529 mthca_free_srq(to_mdev(srq->device), to_msrq(srq));
530 kfree(srq);
531
532 return 0;
533}
534
535static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
536 struct ib_qp_init_attr *init_attr,
537 struct ib_udata *udata)
538{
539 struct mthca_create_qp ucmd;
540 struct mthca_qp *qp;
541 int err;
542
543 switch (init_attr->qp_type) {
544 case IB_QPT_RC:
545 case IB_QPT_UC:
546 case IB_QPT_UD:
547 {
548 struct mthca_ucontext *context;
549
550 qp = kmalloc(sizeof *qp, GFP_KERNEL);
551 if (!qp)
552 return ERR_PTR(-ENOMEM);
553
554 if (pd->uobject) {
555 context = to_mucontext(pd->uobject->context);
556
557 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
558 kfree(qp);
559 return ERR_PTR(-EFAULT);
560 }
561
562 err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
563 context->db_tab,
564 ucmd.sq_db_index, ucmd.sq_db_page);
565 if (err) {
566 kfree(qp);
567 return ERR_PTR(err);
568 }
569
570 err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
571 context->db_tab,
572 ucmd.rq_db_index, ucmd.rq_db_page);
573 if (err) {
574 mthca_unmap_user_db(to_mdev(pd->device),
575 &context->uar,
576 context->db_tab,
577 ucmd.sq_db_index);
578 kfree(qp);
579 return ERR_PTR(err);
580 }
581
582 qp->mr.ibmr.lkey = ucmd.lkey;
583 qp->sq.db_index = ucmd.sq_db_index;
584 qp->rq.db_index = ucmd.rq_db_index;
585 }
586
587 err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd),
588 to_mcq(init_attr->send_cq),
589 to_mcq(init_attr->recv_cq),
590 init_attr->qp_type, init_attr->sq_sig_type,
591 &init_attr->cap, qp);
592
593 if (err && pd->uobject) {
594 context = to_mucontext(pd->uobject->context);
595
596 mthca_unmap_user_db(to_mdev(pd->device),
597 &context->uar,
598 context->db_tab,
599 ucmd.sq_db_index);
600 mthca_unmap_user_db(to_mdev(pd->device),
601 &context->uar,
602 context->db_tab,
603 ucmd.rq_db_index);
604 }
605
606 qp->ibqp.qp_num = qp->qpn;
607 break;
608 }
609 case IB_QPT_SMI:
610 case IB_QPT_GSI:
611 {
612
613 if (pd->uobject)
614 return ERR_PTR(-EINVAL);
615
616 qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL);
617 if (!qp)
618 return ERR_PTR(-ENOMEM);
619
620 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
621
622 err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd),
623 to_mcq(init_attr->send_cq),
624 to_mcq(init_attr->recv_cq),
625 init_attr->sq_sig_type, &init_attr->cap,
626 qp->ibqp.qp_num, init_attr->port_num,
627 to_msqp(qp));
628 break;
629 }
630 default:
631
632 return ERR_PTR(-ENOSYS);
633 }
634
635 if (err) {
636 kfree(qp);
637 return ERR_PTR(err);
638 }
639
640 init_attr->cap.max_send_wr = qp->sq.max;
641 init_attr->cap.max_recv_wr = qp->rq.max;
642 init_attr->cap.max_send_sge = qp->sq.max_gs;
643 init_attr->cap.max_recv_sge = qp->rq.max_gs;
644 init_attr->cap.max_inline_data = qp->max_inline_data;
645
646 return &qp->ibqp;
647}
648
649static int mthca_destroy_qp(struct ib_qp *qp)
650{
651 if (qp->uobject) {
652 mthca_unmap_user_db(to_mdev(qp->device),
653 &to_mucontext(qp->uobject->context)->uar,
654 to_mucontext(qp->uobject->context)->db_tab,
655 to_mqp(qp)->sq.db_index);
656 mthca_unmap_user_db(to_mdev(qp->device),
657 &to_mucontext(qp->uobject->context)->uar,
658 to_mucontext(qp->uobject->context)->db_tab,
659 to_mqp(qp)->rq.db_index);
660 }
661 mthca_free_qp(to_mdev(qp->device), to_mqp(qp));
662 kfree(qp);
663 return 0;
664}
665
666static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries,
667 int comp_vector,
668 struct ib_ucontext *context,
669 struct ib_udata *udata)
670{
671 struct mthca_create_cq ucmd;
672 struct mthca_cq *cq;
673 int nent;
674 int err;
675
676 if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes)
677 return ERR_PTR(-EINVAL);
678
679 if (context) {
680 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
681 return ERR_PTR(-EFAULT);
682
683 err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
684 to_mucontext(context)->db_tab,
685 ucmd.set_db_index, ucmd.set_db_page);
686 if (err)
687 return ERR_PTR(err);
688
689 err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
690 to_mucontext(context)->db_tab,
691 ucmd.arm_db_index, ucmd.arm_db_page);
692 if (err)
693 goto err_unmap_set;
694 }
695
696 cq = kmalloc(sizeof *cq, GFP_KERNEL);
697 if (!cq) {
698 err = -ENOMEM;
699 goto err_unmap_arm;
700 }
701
702 if (context) {
703 cq->buf.mr.ibmr.lkey = ucmd.lkey;
704 cq->set_ci_db_index = ucmd.set_db_index;
705 cq->arm_db_index = ucmd.arm_db_index;
706 }
707
708 for (nent = 1; nent <= entries; nent <<= 1)
709 ;
710
711 err = mthca_init_cq(to_mdev(ibdev), nent,
712 context ? to_mucontext(context) : NULL,
713 context ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num,
714 cq);
715 if (err)
716 goto err_free;
717
718 if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) {
719 mthca_free_cq(to_mdev(ibdev), cq);
720 goto err_free;
721 }
722
723 cq->resize_buf = NULL;
724
725 return &cq->ibcq;
726
727err_free:
728 kfree(cq);
729
730err_unmap_arm:
731 if (context)
732 mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
733 to_mucontext(context)->db_tab, ucmd.arm_db_index);
734
735err_unmap_set:
736 if (context)
737 mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
738 to_mucontext(context)->db_tab, ucmd.set_db_index);
739
740 return ERR_PTR(err);
741}
742
743static int mthca_alloc_resize_buf(struct mthca_dev *dev, struct mthca_cq *cq,
744 int entries)
745{
746 int ret;
747
748 spin_lock_irq(&cq->lock);
749 if (cq->resize_buf) {
750 ret = -EBUSY;
751 goto unlock;
752 }
753
754 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
755 if (!cq->resize_buf) {
756 ret = -ENOMEM;
757 goto unlock;
758 }
759
760 cq->resize_buf->state = CQ_RESIZE_ALLOC;
761
762 ret = 0;
763
764unlock:
765 spin_unlock_irq(&cq->lock);
766
767 if (ret)
768 return ret;
769
770 ret = mthca_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
771 if (ret) {
772 spin_lock_irq(&cq->lock);
773 kfree(cq->resize_buf);
774 cq->resize_buf = NULL;
775 spin_unlock_irq(&cq->lock);
776 return ret;
777 }
778
779 cq->resize_buf->cqe = entries - 1;
780
781 spin_lock_irq(&cq->lock);
782 cq->resize_buf->state = CQ_RESIZE_READY;
783 spin_unlock_irq(&cq->lock);
784
785 return 0;
786}
787
788static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
789{
790 struct mthca_dev *dev = to_mdev(ibcq->device);
791 struct mthca_cq *cq = to_mcq(ibcq);
792 struct mthca_resize_cq ucmd;
793 u32 lkey;
794 u8 status;
795 int ret;
796
797 if (entries < 1 || entries > dev->limits.max_cqes)
798 return -EINVAL;
799
800 mutex_lock(&cq->mutex);
801
802 entries = roundup_pow_of_two(entries + 1);
803 if (entries == ibcq->cqe + 1) {
804 ret = 0;
805 goto out;
806 }
807
808 if (cq->is_kernel) {
809 ret = mthca_alloc_resize_buf(dev, cq, entries);
810 if (ret)
811 goto out;
812 lkey = cq->resize_buf->buf.mr.ibmr.lkey;
813 } else {
814 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
815 ret = -EFAULT;
816 goto out;
817 }
818 lkey = ucmd.lkey;
819 }
820
821 ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, ilog2(entries), &status);
822 if (status)
823 ret = -EINVAL;
824
825 if (ret) {
826 if (cq->resize_buf) {
827 mthca_free_cq_buf(dev, &cq->resize_buf->buf,
828 cq->resize_buf->cqe);
829 kfree(cq->resize_buf);
830 spin_lock_irq(&cq->lock);
831 cq->resize_buf = NULL;
832 spin_unlock_irq(&cq->lock);
833 }
834 goto out;
835 }
836
837 if (cq->is_kernel) {
838 struct mthca_cq_buf tbuf;
839 int tcqe;
840
841 spin_lock_irq(&cq->lock);
842 if (cq->resize_buf->state == CQ_RESIZE_READY) {
843 mthca_cq_resize_copy_cqes(cq);
844 tbuf = cq->buf;
845 tcqe = cq->ibcq.cqe;
846 cq->buf = cq->resize_buf->buf;
847 cq->ibcq.cqe = cq->resize_buf->cqe;
848 } else {
849 tbuf = cq->resize_buf->buf;
850 tcqe = cq->resize_buf->cqe;
851 }
852
853 kfree(cq->resize_buf);
854 cq->resize_buf = NULL;
855 spin_unlock_irq(&cq->lock);
856
857 mthca_free_cq_buf(dev, &tbuf, tcqe);
858 } else
859 ibcq->cqe = entries - 1;
860
861out:
862 mutex_unlock(&cq->mutex);
863
864 return ret;
865}
866
867static int mthca_destroy_cq(struct ib_cq *cq)
868{
869 if (cq->uobject) {
870 mthca_unmap_user_db(to_mdev(cq->device),
871 &to_mucontext(cq->uobject->context)->uar,
872 to_mucontext(cq->uobject->context)->db_tab,
873 to_mcq(cq)->arm_db_index);
874 mthca_unmap_user_db(to_mdev(cq->device),
875 &to_mucontext(cq->uobject->context)->uar,
876 to_mucontext(cq->uobject->context)->db_tab,
877 to_mcq(cq)->set_ci_db_index);
878 }
879 mthca_free_cq(to_mdev(cq->device), to_mcq(cq));
880 kfree(cq);
881
882 return 0;
883}
884
885static inline u32 convert_access(int acc)
886{
887 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MTHCA_MPT_FLAG_ATOMIC : 0) |
888 (acc & IB_ACCESS_REMOTE_WRITE ? MTHCA_MPT_FLAG_REMOTE_WRITE : 0) |
889 (acc & IB_ACCESS_REMOTE_READ ? MTHCA_MPT_FLAG_REMOTE_READ : 0) |
890 (acc & IB_ACCESS_LOCAL_WRITE ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0) |
891 MTHCA_MPT_FLAG_LOCAL_READ;
892}
893
894static struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, int acc)
895{
896 struct mthca_mr *mr;
897 int err;
898
899 mr = kmalloc(sizeof *mr, GFP_KERNEL);
900 if (!mr)
901 return ERR_PTR(-ENOMEM);
902
903 err = mthca_mr_alloc_notrans(to_mdev(pd->device),
904 to_mpd(pd)->pd_num,
905 convert_access(acc), mr);
906
907 if (err) {
908 kfree(mr);
909 return ERR_PTR(err);
910 }
911
912 mr->umem = NULL;
913
914 return &mr->ibmr;
915}
916
917static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd,
918 struct ib_phys_buf *buffer_list,
919 int num_phys_buf,
920 int acc,
921 u64 *iova_start)
922{
923 struct mthca_mr *mr;
924 u64 *page_list;
925 u64 total_size;
926 u64 mask;
927 int shift;
928 int npages;
929 int err;
930 int i, j, n;
931
932
933 if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK))
934 return ERR_PTR(-EINVAL);
935
936 mask = 0;
937 total_size = 0;
938 for (i = 0; i < num_phys_buf; ++i) {
939 if (i != 0)
940 mask |= buffer_list[i].addr;
941 if (i != num_phys_buf - 1)
942 mask |= buffer_list[i].addr + buffer_list[i].size;
943
944 total_size += buffer_list[i].size;
945 }
946
947 if (mask & ~PAGE_MASK)
948 return ERR_PTR(-EINVAL);
949
950
951 for (shift = PAGE_SHIFT; shift < 31; ++shift)
952 if (num_phys_buf > 1) {
953 if ((1ULL << shift) & mask)
954 break;
955 } else {
956 if (1ULL << shift >=
957 buffer_list[0].size +
958 (buffer_list[0].addr & ((1ULL << shift) - 1)))
959 break;
960 }
961
962 buffer_list[0].size += buffer_list[0].addr & ((1ULL << shift) - 1);
963 buffer_list[0].addr &= ~0ull << shift;
964
965 mr = kmalloc(sizeof *mr, GFP_KERNEL);
966 if (!mr)
967 return ERR_PTR(-ENOMEM);
968
969 npages = 0;
970 for (i = 0; i < num_phys_buf; ++i)
971 npages += (buffer_list[i].size + (1ULL << shift) - 1) >> shift;
972
973 if (!npages)
974 return &mr->ibmr;
975
976 page_list = kmalloc(npages * sizeof *page_list, GFP_KERNEL);
977 if (!page_list) {
978 kfree(mr);
979 return ERR_PTR(-ENOMEM);
980 }
981
982 n = 0;
983 for (i = 0; i < num_phys_buf; ++i)
984 for (j = 0;
985 j < (buffer_list[i].size + (1ULL << shift) - 1) >> shift;
986 ++j)
987 page_list[n++] = buffer_list[i].addr + ((u64) j << shift);
988
989 mthca_dbg(to_mdev(pd->device), "Registering memory at %llx (iova %llx) "
990 "in PD %x; shift %d, npages %d.\n",
991 (unsigned long long) buffer_list[0].addr,
992 (unsigned long long) *iova_start,
993 to_mpd(pd)->pd_num,
994 shift, npages);
995
996 err = mthca_mr_alloc_phys(to_mdev(pd->device),
997 to_mpd(pd)->pd_num,
998 page_list, shift, npages,
999 *iova_start, total_size,
1000 convert_access(acc), mr);
1001
1002 if (err) {
1003 kfree(page_list);
1004 kfree(mr);
1005 return ERR_PTR(err);
1006 }
1007
1008 kfree(page_list);
1009 mr->umem = NULL;
1010
1011 return &mr->ibmr;
1012}
1013
1014static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1015 u64 virt, int acc, struct ib_udata *udata)
1016{
1017 struct mthca_dev *dev = to_mdev(pd->device);
1018 struct ib_umem_chunk *chunk;
1019 struct mthca_mr *mr;
1020 u64 *pages;
1021 int shift, n, len;
1022 int i, j, k;
1023 int err = 0;
1024 int write_mtt_size;
1025
1026 mr = kmalloc(sizeof *mr, GFP_KERNEL);
1027 if (!mr)
1028 return ERR_PTR(-ENOMEM);
1029
1030 mr->umem = ib_umem_get(pd->uobject->context, start, length, acc);
1031 if (IS_ERR(mr->umem)) {
1032 err = PTR_ERR(mr->umem);
1033 goto err;
1034 }
1035
1036 shift = ffs(mr->umem->page_size) - 1;
1037
1038 n = 0;
1039 list_for_each_entry(chunk, &mr->umem->chunk_list, list)
1040 n += chunk->nents;
1041
1042 mr->mtt = mthca_alloc_mtt(dev, n);
1043 if (IS_ERR(mr->mtt)) {
1044 err = PTR_ERR(mr->mtt);
1045 goto err_umem;
1046 }
1047
1048 pages = (u64 *) __get_free_page(GFP_KERNEL);
1049 if (!pages) {
1050 err = -ENOMEM;
1051 goto err_mtt;
1052 }
1053
1054 i = n = 0;
1055
1056 write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages));
1057
1058 list_for_each_entry(chunk, &mr->umem->chunk_list, list)
1059 for (j = 0; j < chunk->nmap; ++j) {
1060 len = sg_dma_len(&chunk->page_list[j]) >> shift;
1061 for (k = 0; k < len; ++k) {
1062 pages[i++] = sg_dma_address(&chunk->page_list[j]) +
1063 mr->umem->page_size * k;
1064
1065
1066
1067
1068 if (i == write_mtt_size) {
1069 err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
1070 if (err)
1071 goto mtt_done;
1072 n += i;
1073 i = 0;
1074 }
1075 }
1076 }
1077
1078 if (i)
1079 err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
1080mtt_done:
1081 free_page((unsigned long) pages);
1082 if (err)
1083 goto err_mtt;
1084
1085 err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, shift, virt, length,
1086 convert_access(acc), mr);
1087
1088 if (err)
1089 goto err_mtt;
1090
1091 return &mr->ibmr;
1092
1093err_mtt:
1094 mthca_free_mtt(dev, mr->mtt);
1095
1096err_umem:
1097 ib_umem_release(mr->umem);
1098
1099err:
1100 kfree(mr);
1101 return ERR_PTR(err);
1102}
1103
1104static int mthca_dereg_mr(struct ib_mr *mr)
1105{
1106 struct mthca_mr *mmr = to_mmr(mr);
1107
1108 mthca_free_mr(to_mdev(mr->device), mmr);
1109 if (mmr->umem)
1110 ib_umem_release(mmr->umem);
1111 kfree(mmr);
1112
1113 return 0;
1114}
1115
1116static struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
1117 struct ib_fmr_attr *fmr_attr)
1118{
1119 struct mthca_fmr *fmr;
1120 int err;
1121
1122 fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
1123 if (!fmr)
1124 return ERR_PTR(-ENOMEM);
1125
1126 memcpy(&fmr->attr, fmr_attr, sizeof *fmr_attr);
1127 err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num,
1128 convert_access(mr_access_flags), fmr);
1129
1130 if (err) {
1131 kfree(fmr);
1132 return ERR_PTR(err);
1133 }
1134
1135 return &fmr->ibmr;
1136}
1137
1138static int mthca_dealloc_fmr(struct ib_fmr *fmr)
1139{
1140 struct mthca_fmr *mfmr = to_mfmr(fmr);
1141 int err;
1142
1143 err = mthca_free_fmr(to_mdev(fmr->device), mfmr);
1144 if (err)
1145 return err;
1146
1147 kfree(mfmr);
1148 return 0;
1149}
1150
1151static int mthca_unmap_fmr(struct list_head *fmr_list)
1152{
1153 struct ib_fmr *fmr;
1154 int err;
1155 u8 status;
1156 struct mthca_dev *mdev = NULL;
1157
1158 list_for_each_entry(fmr, fmr_list, list) {
1159 if (mdev && to_mdev(fmr->device) != mdev)
1160 return -EINVAL;
1161 mdev = to_mdev(fmr->device);
1162 }
1163
1164 if (!mdev)
1165 return 0;
1166
1167 if (mthca_is_memfree(mdev)) {
1168 list_for_each_entry(fmr, fmr_list, list)
1169 mthca_arbel_fmr_unmap(mdev, to_mfmr(fmr));
1170
1171 wmb();
1172 } else
1173 list_for_each_entry(fmr, fmr_list, list)
1174 mthca_tavor_fmr_unmap(mdev, to_mfmr(fmr));
1175
1176 err = mthca_SYNC_TPT(mdev, &status);
1177 if (err)
1178 return err;
1179 if (status)
1180 return -EINVAL;
1181 return 0;
1182}
1183
1184static ssize_t show_rev(struct class_device *cdev, char *buf)
1185{
1186 struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
1187 return sprintf(buf, "%x\n", dev->rev_id);
1188}
1189
1190static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
1191{
1192 struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
1193 return sprintf(buf, "%d.%d.%d\n", (int) (dev->fw_ver >> 32),
1194 (int) (dev->fw_ver >> 16) & 0xffff,
1195 (int) dev->fw_ver & 0xffff);
1196}
1197
1198static ssize_t show_hca(struct class_device *cdev, char *buf)
1199{
1200 struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
1201 switch (dev->pdev->device) {
1202 case PCI_DEVICE_ID_MELLANOX_TAVOR:
1203 return sprintf(buf, "MT23108\n");
1204 case PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT:
1205 return sprintf(buf, "MT25208 (MT23108 compat mode)\n");
1206 case PCI_DEVICE_ID_MELLANOX_ARBEL:
1207 return sprintf(buf, "MT25208\n");
1208 case PCI_DEVICE_ID_MELLANOX_SINAI:
1209 case PCI_DEVICE_ID_MELLANOX_SINAI_OLD:
1210 return sprintf(buf, "MT25204\n");
1211 default:
1212 return sprintf(buf, "unknown\n");
1213 }
1214}
1215
1216static ssize_t show_board(struct class_device *cdev, char *buf)
1217{
1218 struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
1219 return sprintf(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id);
1220}
1221
1222static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
1223static CLASS_DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
1224static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
1225static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
1226
1227static struct class_device_attribute *mthca_class_attributes[] = {
1228 &class_device_attr_hw_rev,
1229 &class_device_attr_fw_ver,
1230 &class_device_attr_hca_type,
1231 &class_device_attr_board_id
1232};
1233
1234static int mthca_init_node_data(struct mthca_dev *dev)
1235{
1236 struct ib_smp *in_mad = NULL;
1237 struct ib_smp *out_mad = NULL;
1238 int err = -ENOMEM;
1239 u8 status;
1240
1241 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
1242 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1243 if (!in_mad || !out_mad)
1244 goto out;
1245
1246 init_query_mad(in_mad);
1247 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
1248
1249 err = mthca_MAD_IFC(dev, 1, 1,
1250 1, NULL, NULL, in_mad, out_mad,
1251 &status);
1252 if (err)
1253 goto out;
1254 if (status) {
1255 err = -EINVAL;
1256 goto out;
1257 }
1258
1259 memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
1260
1261 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
1262
1263 err = mthca_MAD_IFC(dev, 1, 1,
1264 1, NULL, NULL, in_mad, out_mad,
1265 &status);
1266 if (err)
1267 goto out;
1268 if (status) {
1269 err = -EINVAL;
1270 goto out;
1271 }
1272
1273 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
1274
1275out:
1276 kfree(in_mad);
1277 kfree(out_mad);
1278 return err;
1279}
1280
1281int mthca_register_device(struct mthca_dev *dev)
1282{
1283 int ret;
1284 int i;
1285
1286 ret = mthca_init_node_data(dev);
1287 if (ret)
1288 return ret;
1289
1290 strlcpy(dev->ib_dev.name, "mthca%d", IB_DEVICE_NAME_MAX);
1291 dev->ib_dev.owner = THIS_MODULE;
1292
1293 dev->ib_dev.uverbs_abi_ver = MTHCA_UVERBS_ABI_VERSION;
1294 dev->ib_dev.uverbs_cmd_mask =
1295 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1296 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1297 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1298 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1299 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1300 (1ull << IB_USER_VERBS_CMD_REG_MR) |
1301 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1302 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1303 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
1304 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
1305 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1306 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
1307 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
1308 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1309 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1310 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
1311 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST);
1312 dev->ib_dev.node_type = RDMA_NODE_IB_CA;
1313 dev->ib_dev.phys_port_cnt = dev->limits.num_ports;
1314 dev->ib_dev.num_comp_vectors = 1;
1315 dev->ib_dev.dma_device = &dev->pdev->dev;
1316 dev->ib_dev.query_device = mthca_query_device;
1317 dev->ib_dev.query_port = mthca_query_port;
1318 dev->ib_dev.modify_device = mthca_modify_device;
1319 dev->ib_dev.modify_port = mthca_modify_port;
1320 dev->ib_dev.query_pkey = mthca_query_pkey;
1321 dev->ib_dev.query_gid = mthca_query_gid;
1322 dev->ib_dev.alloc_ucontext = mthca_alloc_ucontext;
1323 dev->ib_dev.dealloc_ucontext = mthca_dealloc_ucontext;
1324 dev->ib_dev.mmap = mthca_mmap_uar;
1325 dev->ib_dev.alloc_pd = mthca_alloc_pd;
1326 dev->ib_dev.dealloc_pd = mthca_dealloc_pd;
1327 dev->ib_dev.create_ah = mthca_ah_create;
1328 dev->ib_dev.query_ah = mthca_ah_query;
1329 dev->ib_dev.destroy_ah = mthca_ah_destroy;
1330
1331 if (dev->mthca_flags & MTHCA_FLAG_SRQ) {
1332 dev->ib_dev.create_srq = mthca_create_srq;
1333 dev->ib_dev.modify_srq = mthca_modify_srq;
1334 dev->ib_dev.query_srq = mthca_query_srq;
1335 dev->ib_dev.destroy_srq = mthca_destroy_srq;
1336 dev->ib_dev.uverbs_cmd_mask |=
1337 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
1338 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
1339 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
1340 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
1341
1342 if (mthca_is_memfree(dev))
1343 dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv;
1344 else
1345 dev->ib_dev.post_srq_recv = mthca_tavor_post_srq_recv;
1346 }
1347
1348 dev->ib_dev.create_qp = mthca_create_qp;
1349 dev->ib_dev.modify_qp = mthca_modify_qp;
1350 dev->ib_dev.query_qp = mthca_query_qp;
1351 dev->ib_dev.destroy_qp = mthca_destroy_qp;
1352 dev->ib_dev.create_cq = mthca_create_cq;
1353 dev->ib_dev.resize_cq = mthca_resize_cq;
1354 dev->ib_dev.destroy_cq = mthca_destroy_cq;
1355 dev->ib_dev.poll_cq = mthca_poll_cq;
1356 dev->ib_dev.get_dma_mr = mthca_get_dma_mr;
1357 dev->ib_dev.reg_phys_mr = mthca_reg_phys_mr;
1358 dev->ib_dev.reg_user_mr = mthca_reg_user_mr;
1359 dev->ib_dev.dereg_mr = mthca_dereg_mr;
1360
1361 if (dev->mthca_flags & MTHCA_FLAG_FMR) {
1362 dev->ib_dev.alloc_fmr = mthca_alloc_fmr;
1363 dev->ib_dev.unmap_fmr = mthca_unmap_fmr;
1364 dev->ib_dev.dealloc_fmr = mthca_dealloc_fmr;
1365 if (mthca_is_memfree(dev))
1366 dev->ib_dev.map_phys_fmr = mthca_arbel_map_phys_fmr;
1367 else
1368 dev->ib_dev.map_phys_fmr = mthca_tavor_map_phys_fmr;
1369 }
1370
1371 dev->ib_dev.attach_mcast = mthca_multicast_attach;
1372 dev->ib_dev.detach_mcast = mthca_multicast_detach;
1373 dev->ib_dev.process_mad = mthca_process_mad;
1374
1375 if (mthca_is_memfree(dev)) {
1376 dev->ib_dev.req_notify_cq = mthca_arbel_arm_cq;
1377 dev->ib_dev.post_send = mthca_arbel_post_send;
1378 dev->ib_dev.post_recv = mthca_arbel_post_receive;
1379 } else {
1380 dev->ib_dev.req_notify_cq = mthca_tavor_arm_cq;
1381 dev->ib_dev.post_send = mthca_tavor_post_send;
1382 dev->ib_dev.post_recv = mthca_tavor_post_receive;
1383 }
1384
1385 mutex_init(&dev->cap_mask_mutex);
1386
1387 ret = ib_register_device(&dev->ib_dev);
1388 if (ret)
1389 return ret;
1390
1391 for (i = 0; i < ARRAY_SIZE(mthca_class_attributes); ++i) {
1392 ret = class_device_create_file(&dev->ib_dev.class_dev,
1393 mthca_class_attributes[i]);
1394 if (ret) {
1395 ib_unregister_device(&dev->ib_dev);
1396 return ret;
1397 }
1398 }
1399
1400 mthca_start_catas_poll(dev);
1401
1402 return 0;
1403}
1404
1405void mthca_unregister_device(struct mthca_dev *dev)
1406{
1407 mthca_stop_catas_poll(dev);
1408 ib_unregister_device(&dev->ib_dev);
1409}
1410