1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37#include <rdma/ib_smi.h>
38#include <rdma/ib_umem.h>
39#include <rdma/ib_user_verbs.h>
40
41#include <linux/sched.h>
42#include <linux/slab.h>
43#include <linux/stat.h>
44#include <linux/mm.h>
45#include <linux/export.h>
46
47#include "mthca_dev.h"
48#include "mthca_cmd.h"
49#include <rdma/mthca-abi.h>
50#include "mthca_memfree.h"
51
52static void init_query_mad(struct ib_smp *mad)
53{
54 mad->base_version = 1;
55 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
56 mad->class_version = 1;
57 mad->method = IB_MGMT_METHOD_GET;
58}
59
60static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
61 struct ib_udata *uhw)
62{
63 struct ib_smp *in_mad = NULL;
64 struct ib_smp *out_mad = NULL;
65 int err = -ENOMEM;
66 struct mthca_dev *mdev = to_mdev(ibdev);
67
68 if (uhw->inlen || uhw->outlen)
69 return -EINVAL;
70
71 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
72 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
73 if (!in_mad || !out_mad)
74 goto out;
75
76 memset(props, 0, sizeof *props);
77
78 props->fw_ver = mdev->fw_ver;
79
80 init_query_mad(in_mad);
81 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
82
83 err = mthca_MAD_IFC(mdev, 1, 1,
84 1, NULL, NULL, in_mad, out_mad);
85 if (err)
86 goto out;
87
88 props->device_cap_flags = mdev->device_cap_flags;
89 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
90 0xffffff;
91 props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30));
92 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
93 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
94
95 props->max_mr_size = ~0ull;
96 props->page_size_cap = mdev->limits.page_size_cap;
97 props->max_qp = mdev->limits.num_qps - mdev->limits.reserved_qps;
98 props->max_qp_wr = mdev->limits.max_wqes;
99 props->max_send_sge = mdev->limits.max_sg;
100 props->max_recv_sge = mdev->limits.max_sg;
101 props->max_sge_rd = mdev->limits.max_sg;
102 props->max_cq = mdev->limits.num_cqs - mdev->limits.reserved_cqs;
103 props->max_cqe = mdev->limits.max_cqes;
104 props->max_mr = mdev->limits.num_mpts - mdev->limits.reserved_mrws;
105 props->max_pd = mdev->limits.num_pds - mdev->limits.reserved_pds;
106 props->max_qp_rd_atom = 1 << mdev->qp_table.rdb_shift;
107 props->max_qp_init_rd_atom = mdev->limits.max_qp_init_rdma;
108 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
109 props->max_srq = mdev->limits.num_srqs - mdev->limits.reserved_srqs;
110 props->max_srq_wr = mdev->limits.max_srq_wqes;
111 props->max_srq_sge = mdev->limits.max_srq_sge;
112 props->local_ca_ack_delay = mdev->limits.local_ca_ack_delay;
113 props->atomic_cap = mdev->limits.flags & DEV_LIM_FLAG_ATOMIC ?
114 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
115 props->max_pkeys = mdev->limits.pkey_table_len;
116 props->max_mcast_grp = mdev->limits.num_mgms + mdev->limits.num_amgms;
117 props->max_mcast_qp_attach = MTHCA_QP_PER_MGM;
118 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
119 props->max_mcast_grp;
120
121
122
123
124
125 if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
126 props->max_map_per_fmr = 255;
127 else
128 props->max_map_per_fmr =
129 (1 << (32 - ilog2(mdev->limits.num_mpts))) - 1;
130
131 err = 0;
132 out:
133 kfree(in_mad);
134 kfree(out_mad);
135 return err;
136}
137
138static int mthca_query_port(struct ib_device *ibdev,
139 u8 port, struct ib_port_attr *props)
140{
141 struct ib_smp *in_mad = NULL;
142 struct ib_smp *out_mad = NULL;
143 int err = -ENOMEM;
144
145 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
146 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
147 if (!in_mad || !out_mad)
148 goto out;
149
150
151
152 init_query_mad(in_mad);
153 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
154 in_mad->attr_mod = cpu_to_be32(port);
155
156 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
157 port, NULL, NULL, in_mad, out_mad);
158 if (err)
159 goto out;
160
161 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
162 props->lmc = out_mad->data[34] & 0x7;
163 props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
164 props->sm_sl = out_mad->data[36] & 0xf;
165 props->state = out_mad->data[32] & 0xf;
166 props->phys_state = out_mad->data[33] >> 4;
167 props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
168 props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len;
169 props->max_msg_sz = 0x80000000;
170 props->pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len;
171 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
172 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
173 props->active_width = out_mad->data[31] & 0xf;
174 props->active_speed = out_mad->data[35] >> 4;
175 props->max_mtu = out_mad->data[41] & 0xf;
176 props->active_mtu = out_mad->data[36] >> 4;
177 props->subnet_timeout = out_mad->data[51] & 0x1f;
178 props->max_vl_num = out_mad->data[37] >> 4;
179 props->init_type_reply = out_mad->data[41] >> 4;
180
181 out:
182 kfree(in_mad);
183 kfree(out_mad);
184 return err;
185}
186
187static int mthca_modify_device(struct ib_device *ibdev,
188 int mask,
189 struct ib_device_modify *props)
190{
191 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
192 return -EOPNOTSUPP;
193
194 if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
195 if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
196 return -ERESTARTSYS;
197 memcpy(ibdev->node_desc, props->node_desc,
198 IB_DEVICE_NODE_DESC_MAX);
199 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
200 }
201
202 return 0;
203}
204
205static int mthca_modify_port(struct ib_device *ibdev,
206 u8 port, int port_modify_mask,
207 struct ib_port_modify *props)
208{
209 struct mthca_set_ib_param set_ib;
210 struct ib_port_attr attr;
211 int err;
212
213 if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
214 return -ERESTARTSYS;
215
216 err = ib_query_port(ibdev, port, &attr);
217 if (err)
218 goto out;
219
220 set_ib.set_si_guid = 0;
221 set_ib.reset_qkey_viol = !!(port_modify_mask & IB_PORT_RESET_QKEY_CNTR);
222
223 set_ib.cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
224 ~props->clr_port_cap_mask;
225
226 err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port);
227 if (err)
228 goto out;
229out:
230 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
231 return err;
232}
233
234static int mthca_query_pkey(struct ib_device *ibdev,
235 u8 port, u16 index, u16 *pkey)
236{
237 struct ib_smp *in_mad = NULL;
238 struct ib_smp *out_mad = NULL;
239 int err = -ENOMEM;
240
241 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
242 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
243 if (!in_mad || !out_mad)
244 goto out;
245
246 init_query_mad(in_mad);
247 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
248 in_mad->attr_mod = cpu_to_be32(index / 32);
249
250 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
251 port, NULL, NULL, in_mad, out_mad);
252 if (err)
253 goto out;
254
255 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
256
257 out:
258 kfree(in_mad);
259 kfree(out_mad);
260 return err;
261}
262
263static int mthca_query_gid(struct ib_device *ibdev, u8 port,
264 int index, union ib_gid *gid)
265{
266 struct ib_smp *in_mad = NULL;
267 struct ib_smp *out_mad = NULL;
268 int err = -ENOMEM;
269
270 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
271 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
272 if (!in_mad || !out_mad)
273 goto out;
274
275 init_query_mad(in_mad);
276 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
277 in_mad->attr_mod = cpu_to_be32(port);
278
279 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
280 port, NULL, NULL, in_mad, out_mad);
281 if (err)
282 goto out;
283
284 memcpy(gid->raw, out_mad->data + 8, 8);
285
286 init_query_mad(in_mad);
287 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
288 in_mad->attr_mod = cpu_to_be32(index / 8);
289
290 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
291 port, NULL, NULL, in_mad, out_mad);
292 if (err)
293 goto out;
294
295 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
296
297 out:
298 kfree(in_mad);
299 kfree(out_mad);
300 return err;
301}
302
303static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
304 struct ib_udata *udata)
305{
306 struct mthca_alloc_ucontext_resp uresp;
307 struct mthca_ucontext *context;
308 int err;
309
310 if (!(to_mdev(ibdev)->active))
311 return ERR_PTR(-EAGAIN);
312
313 memset(&uresp, 0, sizeof uresp);
314
315 uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps;
316 if (mthca_is_memfree(to_mdev(ibdev)))
317 uresp.uarc_size = to_mdev(ibdev)->uar_table.uarc_size;
318 else
319 uresp.uarc_size = 0;
320
321 context = kmalloc(sizeof *context, GFP_KERNEL);
322 if (!context)
323 return ERR_PTR(-ENOMEM);
324
325 err = mthca_uar_alloc(to_mdev(ibdev), &context->uar);
326 if (err) {
327 kfree(context);
328 return ERR_PTR(err);
329 }
330
331 context->db_tab = mthca_init_user_db_tab(to_mdev(ibdev));
332 if (IS_ERR(context->db_tab)) {
333 err = PTR_ERR(context->db_tab);
334 mthca_uar_free(to_mdev(ibdev), &context->uar);
335 kfree(context);
336 return ERR_PTR(err);
337 }
338
339 if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) {
340 mthca_cleanup_user_db_tab(to_mdev(ibdev), &context->uar, context->db_tab);
341 mthca_uar_free(to_mdev(ibdev), &context->uar);
342 kfree(context);
343 return ERR_PTR(-EFAULT);
344 }
345
346 context->reg_mr_warned = 0;
347
348 return &context->ibucontext;
349}
350
351static int mthca_dealloc_ucontext(struct ib_ucontext *context)
352{
353 mthca_cleanup_user_db_tab(to_mdev(context->device), &to_mucontext(context)->uar,
354 to_mucontext(context)->db_tab);
355 mthca_uar_free(to_mdev(context->device), &to_mucontext(context)->uar);
356 kfree(to_mucontext(context));
357
358 return 0;
359}
360
361static int mthca_mmap_uar(struct ib_ucontext *context,
362 struct vm_area_struct *vma)
363{
364 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
365 return -EINVAL;
366
367 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
368
369 if (io_remap_pfn_range(vma, vma->vm_start,
370 to_mucontext(context)->uar.pfn,
371 PAGE_SIZE, vma->vm_page_prot))
372 return -EAGAIN;
373
374 return 0;
375}
376
377static struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev,
378 struct ib_ucontext *context,
379 struct ib_udata *udata)
380{
381 struct mthca_pd *pd;
382 int err;
383
384 pd = kmalloc(sizeof *pd, GFP_KERNEL);
385 if (!pd)
386 return ERR_PTR(-ENOMEM);
387
388 err = mthca_pd_alloc(to_mdev(ibdev), !context, pd);
389 if (err) {
390 kfree(pd);
391 return ERR_PTR(err);
392 }
393
394 if (context) {
395 if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) {
396 mthca_pd_free(to_mdev(ibdev), pd);
397 kfree(pd);
398 return ERR_PTR(-EFAULT);
399 }
400 }
401
402 return &pd->ibpd;
403}
404
405static int mthca_dealloc_pd(struct ib_pd *pd)
406{
407 mthca_pd_free(to_mdev(pd->device), to_mpd(pd));
408 kfree(pd);
409
410 return 0;
411}
412
413static struct ib_ah *mthca_ah_create(struct ib_pd *pd,
414 struct rdma_ah_attr *ah_attr,
415 struct ib_udata *udata)
416
417{
418 int err;
419 struct mthca_ah *ah;
420
421 ah = kmalloc(sizeof *ah, GFP_ATOMIC);
422 if (!ah)
423 return ERR_PTR(-ENOMEM);
424
425 err = mthca_create_ah(to_mdev(pd->device), to_mpd(pd), ah_attr, ah);
426 if (err) {
427 kfree(ah);
428 return ERR_PTR(err);
429 }
430
431 return &ah->ibah;
432}
433
434static int mthca_ah_destroy(struct ib_ah *ah)
435{
436 mthca_destroy_ah(to_mdev(ah->device), to_mah(ah));
437 kfree(ah);
438
439 return 0;
440}
441
442static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
443 struct ib_srq_init_attr *init_attr,
444 struct ib_udata *udata)
445{
446 struct mthca_create_srq ucmd;
447 struct mthca_ucontext *context = NULL;
448 struct mthca_srq *srq;
449 int err;
450
451 if (init_attr->srq_type != IB_SRQT_BASIC)
452 return ERR_PTR(-EOPNOTSUPP);
453
454 srq = kmalloc(sizeof *srq, GFP_KERNEL);
455 if (!srq)
456 return ERR_PTR(-ENOMEM);
457
458 if (pd->uobject) {
459 context = to_mucontext(pd->uobject->context);
460
461 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
462 err = -EFAULT;
463 goto err_free;
464 }
465
466 err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
467 context->db_tab, ucmd.db_index,
468 ucmd.db_page);
469
470 if (err)
471 goto err_free;
472
473 srq->mr.ibmr.lkey = ucmd.lkey;
474 srq->db_index = ucmd.db_index;
475 }
476
477 err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd),
478 &init_attr->attr, srq);
479
480 if (err && pd->uobject)
481 mthca_unmap_user_db(to_mdev(pd->device), &context->uar,
482 context->db_tab, ucmd.db_index);
483
484 if (err)
485 goto err_free;
486
487 if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) {
488 mthca_free_srq(to_mdev(pd->device), srq);
489 err = -EFAULT;
490 goto err_free;
491 }
492
493 return &srq->ibsrq;
494
495err_free:
496 kfree(srq);
497
498 return ERR_PTR(err);
499}
500
501static int mthca_destroy_srq(struct ib_srq *srq)
502{
503 struct mthca_ucontext *context;
504
505 if (srq->uobject) {
506 context = to_mucontext(srq->uobject->context);
507
508 mthca_unmap_user_db(to_mdev(srq->device), &context->uar,
509 context->db_tab, to_msrq(srq)->db_index);
510 }
511
512 mthca_free_srq(to_mdev(srq->device), to_msrq(srq));
513 kfree(srq);
514
515 return 0;
516}
517
518static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
519 struct ib_qp_init_attr *init_attr,
520 struct ib_udata *udata)
521{
522 struct mthca_create_qp ucmd;
523 struct mthca_qp *qp;
524 int err;
525
526 if (init_attr->create_flags)
527 return ERR_PTR(-EINVAL);
528
529 switch (init_attr->qp_type) {
530 case IB_QPT_RC:
531 case IB_QPT_UC:
532 case IB_QPT_UD:
533 {
534 struct mthca_ucontext *context;
535
536 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
537 if (!qp)
538 return ERR_PTR(-ENOMEM);
539
540 if (pd->uobject) {
541 context = to_mucontext(pd->uobject->context);
542
543 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
544 kfree(qp);
545 return ERR_PTR(-EFAULT);
546 }
547
548 err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
549 context->db_tab,
550 ucmd.sq_db_index, ucmd.sq_db_page);
551 if (err) {
552 kfree(qp);
553 return ERR_PTR(err);
554 }
555
556 err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
557 context->db_tab,
558 ucmd.rq_db_index, ucmd.rq_db_page);
559 if (err) {
560 mthca_unmap_user_db(to_mdev(pd->device),
561 &context->uar,
562 context->db_tab,
563 ucmd.sq_db_index);
564 kfree(qp);
565 return ERR_PTR(err);
566 }
567
568 qp->mr.ibmr.lkey = ucmd.lkey;
569 qp->sq.db_index = ucmd.sq_db_index;
570 qp->rq.db_index = ucmd.rq_db_index;
571 }
572
573 err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd),
574 to_mcq(init_attr->send_cq),
575 to_mcq(init_attr->recv_cq),
576 init_attr->qp_type, init_attr->sq_sig_type,
577 &init_attr->cap, qp);
578
579 if (err && pd->uobject) {
580 context = to_mucontext(pd->uobject->context);
581
582 mthca_unmap_user_db(to_mdev(pd->device),
583 &context->uar,
584 context->db_tab,
585 ucmd.sq_db_index);
586 mthca_unmap_user_db(to_mdev(pd->device),
587 &context->uar,
588 context->db_tab,
589 ucmd.rq_db_index);
590 }
591
592 qp->ibqp.qp_num = qp->qpn;
593 break;
594 }
595 case IB_QPT_SMI:
596 case IB_QPT_GSI:
597 {
598
599 if (pd->uobject)
600 return ERR_PTR(-EINVAL);
601
602 qp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL);
603 if (!qp)
604 return ERR_PTR(-ENOMEM);
605
606 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
607
608 err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd),
609 to_mcq(init_attr->send_cq),
610 to_mcq(init_attr->recv_cq),
611 init_attr->sq_sig_type, &init_attr->cap,
612 qp->ibqp.qp_num, init_attr->port_num,
613 to_msqp(qp));
614 break;
615 }
616 default:
617
618 return ERR_PTR(-ENOSYS);
619 }
620
621 if (err) {
622 kfree(qp);
623 return ERR_PTR(err);
624 }
625
626 init_attr->cap.max_send_wr = qp->sq.max;
627 init_attr->cap.max_recv_wr = qp->rq.max;
628 init_attr->cap.max_send_sge = qp->sq.max_gs;
629 init_attr->cap.max_recv_sge = qp->rq.max_gs;
630 init_attr->cap.max_inline_data = qp->max_inline_data;
631
632 return &qp->ibqp;
633}
634
635static int mthca_destroy_qp(struct ib_qp *qp)
636{
637 if (qp->uobject) {
638 mthca_unmap_user_db(to_mdev(qp->device),
639 &to_mucontext(qp->uobject->context)->uar,
640 to_mucontext(qp->uobject->context)->db_tab,
641 to_mqp(qp)->sq.db_index);
642 mthca_unmap_user_db(to_mdev(qp->device),
643 &to_mucontext(qp->uobject->context)->uar,
644 to_mucontext(qp->uobject->context)->db_tab,
645 to_mqp(qp)->rq.db_index);
646 }
647 mthca_free_qp(to_mdev(qp->device), to_mqp(qp));
648 kfree(qp);
649 return 0;
650}
651
652static struct ib_cq *mthca_create_cq(struct ib_device *ibdev,
653 const struct ib_cq_init_attr *attr,
654 struct ib_ucontext *context,
655 struct ib_udata *udata)
656{
657 int entries = attr->cqe;
658 struct mthca_create_cq ucmd;
659 struct mthca_cq *cq;
660 int nent;
661 int err;
662
663 if (attr->flags)
664 return ERR_PTR(-EINVAL);
665
666 if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes)
667 return ERR_PTR(-EINVAL);
668
669 if (context) {
670 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
671 return ERR_PTR(-EFAULT);
672
673 err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
674 to_mucontext(context)->db_tab,
675 ucmd.set_db_index, ucmd.set_db_page);
676 if (err)
677 return ERR_PTR(err);
678
679 err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
680 to_mucontext(context)->db_tab,
681 ucmd.arm_db_index, ucmd.arm_db_page);
682 if (err)
683 goto err_unmap_set;
684 }
685
686 cq = kmalloc(sizeof *cq, GFP_KERNEL);
687 if (!cq) {
688 err = -ENOMEM;
689 goto err_unmap_arm;
690 }
691
692 if (context) {
693 cq->buf.mr.ibmr.lkey = ucmd.lkey;
694 cq->set_ci_db_index = ucmd.set_db_index;
695 cq->arm_db_index = ucmd.arm_db_index;
696 }
697
698 for (nent = 1; nent <= entries; nent <<= 1)
699 ;
700
701 err = mthca_init_cq(to_mdev(ibdev), nent,
702 context ? to_mucontext(context) : NULL,
703 context ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num,
704 cq);
705 if (err)
706 goto err_free;
707
708 if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) {
709 mthca_free_cq(to_mdev(ibdev), cq);
710 err = -EFAULT;
711 goto err_free;
712 }
713
714 cq->resize_buf = NULL;
715
716 return &cq->ibcq;
717
718err_free:
719 kfree(cq);
720
721err_unmap_arm:
722 if (context)
723 mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
724 to_mucontext(context)->db_tab, ucmd.arm_db_index);
725
726err_unmap_set:
727 if (context)
728 mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
729 to_mucontext(context)->db_tab, ucmd.set_db_index);
730
731 return ERR_PTR(err);
732}
733
734static int mthca_alloc_resize_buf(struct mthca_dev *dev, struct mthca_cq *cq,
735 int entries)
736{
737 int ret;
738
739 spin_lock_irq(&cq->lock);
740 if (cq->resize_buf) {
741 ret = -EBUSY;
742 goto unlock;
743 }
744
745 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
746 if (!cq->resize_buf) {
747 ret = -ENOMEM;
748 goto unlock;
749 }
750
751 cq->resize_buf->state = CQ_RESIZE_ALLOC;
752
753 ret = 0;
754
755unlock:
756 spin_unlock_irq(&cq->lock);
757
758 if (ret)
759 return ret;
760
761 ret = mthca_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
762 if (ret) {
763 spin_lock_irq(&cq->lock);
764 kfree(cq->resize_buf);
765 cq->resize_buf = NULL;
766 spin_unlock_irq(&cq->lock);
767 return ret;
768 }
769
770 cq->resize_buf->cqe = entries - 1;
771
772 spin_lock_irq(&cq->lock);
773 cq->resize_buf->state = CQ_RESIZE_READY;
774 spin_unlock_irq(&cq->lock);
775
776 return 0;
777}
778
779static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
780{
781 struct mthca_dev *dev = to_mdev(ibcq->device);
782 struct mthca_cq *cq = to_mcq(ibcq);
783 struct mthca_resize_cq ucmd;
784 u32 lkey;
785 int ret;
786
787 if (entries < 1 || entries > dev->limits.max_cqes)
788 return -EINVAL;
789
790 mutex_lock(&cq->mutex);
791
792 entries = roundup_pow_of_two(entries + 1);
793 if (entries == ibcq->cqe + 1) {
794 ret = 0;
795 goto out;
796 }
797
798 if (cq->is_kernel) {
799 ret = mthca_alloc_resize_buf(dev, cq, entries);
800 if (ret)
801 goto out;
802 lkey = cq->resize_buf->buf.mr.ibmr.lkey;
803 } else {
804 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
805 ret = -EFAULT;
806 goto out;
807 }
808 lkey = ucmd.lkey;
809 }
810
811 ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, ilog2(entries));
812
813 if (ret) {
814 if (cq->resize_buf) {
815 mthca_free_cq_buf(dev, &cq->resize_buf->buf,
816 cq->resize_buf->cqe);
817 kfree(cq->resize_buf);
818 spin_lock_irq(&cq->lock);
819 cq->resize_buf = NULL;
820 spin_unlock_irq(&cq->lock);
821 }
822 goto out;
823 }
824
825 if (cq->is_kernel) {
826 struct mthca_cq_buf tbuf;
827 int tcqe;
828
829 spin_lock_irq(&cq->lock);
830 if (cq->resize_buf->state == CQ_RESIZE_READY) {
831 mthca_cq_resize_copy_cqes(cq);
832 tbuf = cq->buf;
833 tcqe = cq->ibcq.cqe;
834 cq->buf = cq->resize_buf->buf;
835 cq->ibcq.cqe = cq->resize_buf->cqe;
836 } else {
837 tbuf = cq->resize_buf->buf;
838 tcqe = cq->resize_buf->cqe;
839 }
840
841 kfree(cq->resize_buf);
842 cq->resize_buf = NULL;
843 spin_unlock_irq(&cq->lock);
844
845 mthca_free_cq_buf(dev, &tbuf, tcqe);
846 } else
847 ibcq->cqe = entries - 1;
848
849out:
850 mutex_unlock(&cq->mutex);
851
852 return ret;
853}
854
855static int mthca_destroy_cq(struct ib_cq *cq)
856{
857 if (cq->uobject) {
858 mthca_unmap_user_db(to_mdev(cq->device),
859 &to_mucontext(cq->uobject->context)->uar,
860 to_mucontext(cq->uobject->context)->db_tab,
861 to_mcq(cq)->arm_db_index);
862 mthca_unmap_user_db(to_mdev(cq->device),
863 &to_mucontext(cq->uobject->context)->uar,
864 to_mucontext(cq->uobject->context)->db_tab,
865 to_mcq(cq)->set_ci_db_index);
866 }
867 mthca_free_cq(to_mdev(cq->device), to_mcq(cq));
868 kfree(cq);
869
870 return 0;
871}
872
873static inline u32 convert_access(int acc)
874{
875 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MTHCA_MPT_FLAG_ATOMIC : 0) |
876 (acc & IB_ACCESS_REMOTE_WRITE ? MTHCA_MPT_FLAG_REMOTE_WRITE : 0) |
877 (acc & IB_ACCESS_REMOTE_READ ? MTHCA_MPT_FLAG_REMOTE_READ : 0) |
878 (acc & IB_ACCESS_LOCAL_WRITE ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0) |
879 MTHCA_MPT_FLAG_LOCAL_READ;
880}
881
882static struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, int acc)
883{
884 struct mthca_mr *mr;
885 int err;
886
887 mr = kmalloc(sizeof *mr, GFP_KERNEL);
888 if (!mr)
889 return ERR_PTR(-ENOMEM);
890
891 err = mthca_mr_alloc_notrans(to_mdev(pd->device),
892 to_mpd(pd)->pd_num,
893 convert_access(acc), mr);
894
895 if (err) {
896 kfree(mr);
897 return ERR_PTR(err);
898 }
899
900 mr->umem = NULL;
901
902 return &mr->ibmr;
903}
904
905static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
906 u64 virt, int acc, struct ib_udata *udata)
907{
908 struct mthca_dev *dev = to_mdev(pd->device);
909 struct scatterlist *sg;
910 struct mthca_mr *mr;
911 struct mthca_reg_mr ucmd;
912 u64 *pages;
913 int shift, n, len;
914 int i, k, entry;
915 int err = 0;
916 int write_mtt_size;
917
918 if (udata->inlen < sizeof ucmd) {
919 if (!to_mucontext(pd->uobject->context)->reg_mr_warned) {
920 mthca_warn(dev, "Process '%s' did not pass in MR attrs.\n",
921 current->comm);
922 mthca_warn(dev, " Update libmthca to fix this.\n");
923 }
924 ++to_mucontext(pd->uobject->context)->reg_mr_warned;
925 ucmd.mr_attrs = 0;
926 } else if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
927 return ERR_PTR(-EFAULT);
928
929 mr = kmalloc(sizeof *mr, GFP_KERNEL);
930 if (!mr)
931 return ERR_PTR(-ENOMEM);
932
933 mr->umem = ib_umem_get(pd->uobject->context, start, length, acc,
934 ucmd.mr_attrs & MTHCA_MR_DMASYNC);
935
936 if (IS_ERR(mr->umem)) {
937 err = PTR_ERR(mr->umem);
938 goto err;
939 }
940
941 shift = mr->umem->page_shift;
942 n = mr->umem->nmap;
943
944 mr->mtt = mthca_alloc_mtt(dev, n);
945 if (IS_ERR(mr->mtt)) {
946 err = PTR_ERR(mr->mtt);
947 goto err_umem;
948 }
949
950 pages = (u64 *) __get_free_page(GFP_KERNEL);
951 if (!pages) {
952 err = -ENOMEM;
953 goto err_mtt;
954 }
955
956 i = n = 0;
957
958 write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages));
959
960 for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
961 len = sg_dma_len(sg) >> shift;
962 for (k = 0; k < len; ++k) {
963 pages[i++] = sg_dma_address(sg) + (k << shift);
964
965
966
967
968 if (i == write_mtt_size) {
969 err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
970 if (err)
971 goto mtt_done;
972 n += i;
973 i = 0;
974 }
975 }
976 }
977
978 if (i)
979 err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
980mtt_done:
981 free_page((unsigned long) pages);
982 if (err)
983 goto err_mtt;
984
985 err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, shift, virt, length,
986 convert_access(acc), mr);
987
988 if (err)
989 goto err_mtt;
990
991 return &mr->ibmr;
992
993err_mtt:
994 mthca_free_mtt(dev, mr->mtt);
995
996err_umem:
997 ib_umem_release(mr->umem);
998
999err:
1000 kfree(mr);
1001 return ERR_PTR(err);
1002}
1003
1004static int mthca_dereg_mr(struct ib_mr *mr)
1005{
1006 struct mthca_mr *mmr = to_mmr(mr);
1007
1008 mthca_free_mr(to_mdev(mr->device), mmr);
1009 if (mmr->umem)
1010 ib_umem_release(mmr->umem);
1011 kfree(mmr);
1012
1013 return 0;
1014}
1015
1016static struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
1017 struct ib_fmr_attr *fmr_attr)
1018{
1019 struct mthca_fmr *fmr;
1020 int err;
1021
1022 fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
1023 if (!fmr)
1024 return ERR_PTR(-ENOMEM);
1025
1026 memcpy(&fmr->attr, fmr_attr, sizeof *fmr_attr);
1027 err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num,
1028 convert_access(mr_access_flags), fmr);
1029
1030 if (err) {
1031 kfree(fmr);
1032 return ERR_PTR(err);
1033 }
1034
1035 return &fmr->ibmr;
1036}
1037
1038static int mthca_dealloc_fmr(struct ib_fmr *fmr)
1039{
1040 struct mthca_fmr *mfmr = to_mfmr(fmr);
1041 int err;
1042
1043 err = mthca_free_fmr(to_mdev(fmr->device), mfmr);
1044 if (err)
1045 return err;
1046
1047 kfree(mfmr);
1048 return 0;
1049}
1050
1051static int mthca_unmap_fmr(struct list_head *fmr_list)
1052{
1053 struct ib_fmr *fmr;
1054 int err;
1055 struct mthca_dev *mdev = NULL;
1056
1057 list_for_each_entry(fmr, fmr_list, list) {
1058 if (mdev && to_mdev(fmr->device) != mdev)
1059 return -EINVAL;
1060 mdev = to_mdev(fmr->device);
1061 }
1062
1063 if (!mdev)
1064 return 0;
1065
1066 if (mthca_is_memfree(mdev)) {
1067 list_for_each_entry(fmr, fmr_list, list)
1068 mthca_arbel_fmr_unmap(mdev, to_mfmr(fmr));
1069
1070 wmb();
1071 } else
1072 list_for_each_entry(fmr, fmr_list, list)
1073 mthca_tavor_fmr_unmap(mdev, to_mfmr(fmr));
1074
1075 err = mthca_SYNC_TPT(mdev);
1076 return err;
1077}
1078
1079static ssize_t hw_rev_show(struct device *device,
1080 struct device_attribute *attr, char *buf)
1081{
1082 struct mthca_dev *dev =
1083 container_of(device, struct mthca_dev, ib_dev.dev);
1084 return sprintf(buf, "%x\n", dev->rev_id);
1085}
1086static DEVICE_ATTR_RO(hw_rev);
1087
1088static ssize_t hca_type_show(struct device *device,
1089 struct device_attribute *attr, char *buf)
1090{
1091 struct mthca_dev *dev =
1092 container_of(device, struct mthca_dev, ib_dev.dev);
1093 switch (dev->pdev->device) {
1094 case PCI_DEVICE_ID_MELLANOX_TAVOR:
1095 return sprintf(buf, "MT23108\n");
1096 case PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT:
1097 return sprintf(buf, "MT25208 (MT23108 compat mode)\n");
1098 case PCI_DEVICE_ID_MELLANOX_ARBEL:
1099 return sprintf(buf, "MT25208\n");
1100 case PCI_DEVICE_ID_MELLANOX_SINAI:
1101 case PCI_DEVICE_ID_MELLANOX_SINAI_OLD:
1102 return sprintf(buf, "MT25204\n");
1103 default:
1104 return sprintf(buf, "unknown\n");
1105 }
1106}
1107static DEVICE_ATTR_RO(hca_type);
1108
1109static ssize_t board_id_show(struct device *device,
1110 struct device_attribute *attr, char *buf)
1111{
1112 struct mthca_dev *dev =
1113 container_of(device, struct mthca_dev, ib_dev.dev);
1114 return sprintf(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id);
1115}
1116static DEVICE_ATTR_RO(board_id);
1117
1118static struct attribute *mthca_dev_attributes[] = {
1119 &dev_attr_hw_rev.attr,
1120 &dev_attr_hca_type.attr,
1121 &dev_attr_board_id.attr,
1122 NULL
1123};
1124
1125static const struct attribute_group mthca_attr_group = {
1126 .attrs = mthca_dev_attributes,
1127};
1128
1129static int mthca_init_node_data(struct mthca_dev *dev)
1130{
1131 struct ib_smp *in_mad = NULL;
1132 struct ib_smp *out_mad = NULL;
1133 int err = -ENOMEM;
1134
1135 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
1136 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1137 if (!in_mad || !out_mad)
1138 goto out;
1139
1140 init_query_mad(in_mad);
1141 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
1142
1143 err = mthca_MAD_IFC(dev, 1, 1,
1144 1, NULL, NULL, in_mad, out_mad);
1145 if (err)
1146 goto out;
1147
1148 memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
1149
1150 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
1151
1152 err = mthca_MAD_IFC(dev, 1, 1,
1153 1, NULL, NULL, in_mad, out_mad);
1154 if (err)
1155 goto out;
1156
1157 if (mthca_is_memfree(dev))
1158 dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
1159 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
1160
1161out:
1162 kfree(in_mad);
1163 kfree(out_mad);
1164 return err;
1165}
1166
1167static int mthca_port_immutable(struct ib_device *ibdev, u8 port_num,
1168 struct ib_port_immutable *immutable)
1169{
1170 struct ib_port_attr attr;
1171 int err;
1172
1173 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
1174
1175 err = ib_query_port(ibdev, port_num, &attr);
1176 if (err)
1177 return err;
1178
1179 immutable->pkey_tbl_len = attr.pkey_tbl_len;
1180 immutable->gid_tbl_len = attr.gid_tbl_len;
1181 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
1182
1183 return 0;
1184}
1185
1186static void get_dev_fw_str(struct ib_device *device, char *str)
1187{
1188 struct mthca_dev *dev =
1189 container_of(device, struct mthca_dev, ib_dev);
1190 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d",
1191 (int) (dev->fw_ver >> 32),
1192 (int) (dev->fw_ver >> 16) & 0xffff,
1193 (int) dev->fw_ver & 0xffff);
1194}
1195
1196int mthca_register_device(struct mthca_dev *dev)
1197{
1198 int ret;
1199
1200 ret = mthca_init_node_data(dev);
1201 if (ret)
1202 return ret;
1203
1204 dev->ib_dev.owner = THIS_MODULE;
1205
1206 dev->ib_dev.uverbs_abi_ver = MTHCA_UVERBS_ABI_VERSION;
1207 dev->ib_dev.uverbs_cmd_mask =
1208 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1209 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1210 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1211 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1212 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1213 (1ull << IB_USER_VERBS_CMD_REG_MR) |
1214 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1215 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1216 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
1217 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
1218 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1219 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
1220 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
1221 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1222 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1223 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
1224 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST);
1225 dev->ib_dev.node_type = RDMA_NODE_IB_CA;
1226 dev->ib_dev.phys_port_cnt = dev->limits.num_ports;
1227 dev->ib_dev.num_comp_vectors = 1;
1228 dev->ib_dev.dev.parent = &dev->pdev->dev;
1229 dev->ib_dev.query_device = mthca_query_device;
1230 dev->ib_dev.query_port = mthca_query_port;
1231 dev->ib_dev.modify_device = mthca_modify_device;
1232 dev->ib_dev.modify_port = mthca_modify_port;
1233 dev->ib_dev.query_pkey = mthca_query_pkey;
1234 dev->ib_dev.query_gid = mthca_query_gid;
1235 dev->ib_dev.alloc_ucontext = mthca_alloc_ucontext;
1236 dev->ib_dev.dealloc_ucontext = mthca_dealloc_ucontext;
1237 dev->ib_dev.mmap = mthca_mmap_uar;
1238 dev->ib_dev.alloc_pd = mthca_alloc_pd;
1239 dev->ib_dev.dealloc_pd = mthca_dealloc_pd;
1240 dev->ib_dev.create_ah = mthca_ah_create;
1241 dev->ib_dev.query_ah = mthca_ah_query;
1242 dev->ib_dev.destroy_ah = mthca_ah_destroy;
1243
1244 if (dev->mthca_flags & MTHCA_FLAG_SRQ) {
1245 dev->ib_dev.create_srq = mthca_create_srq;
1246 dev->ib_dev.modify_srq = mthca_modify_srq;
1247 dev->ib_dev.query_srq = mthca_query_srq;
1248 dev->ib_dev.destroy_srq = mthca_destroy_srq;
1249 dev->ib_dev.uverbs_cmd_mask |=
1250 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
1251 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
1252 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
1253 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
1254
1255 if (mthca_is_memfree(dev))
1256 dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv;
1257 else
1258 dev->ib_dev.post_srq_recv = mthca_tavor_post_srq_recv;
1259 }
1260
1261 dev->ib_dev.create_qp = mthca_create_qp;
1262 dev->ib_dev.modify_qp = mthca_modify_qp;
1263 dev->ib_dev.query_qp = mthca_query_qp;
1264 dev->ib_dev.destroy_qp = mthca_destroy_qp;
1265 dev->ib_dev.create_cq = mthca_create_cq;
1266 dev->ib_dev.resize_cq = mthca_resize_cq;
1267 dev->ib_dev.destroy_cq = mthca_destroy_cq;
1268 dev->ib_dev.poll_cq = mthca_poll_cq;
1269 dev->ib_dev.get_dma_mr = mthca_get_dma_mr;
1270 dev->ib_dev.reg_user_mr = mthca_reg_user_mr;
1271 dev->ib_dev.dereg_mr = mthca_dereg_mr;
1272 dev->ib_dev.get_port_immutable = mthca_port_immutable;
1273 dev->ib_dev.get_dev_fw_str = get_dev_fw_str;
1274
1275 if (dev->mthca_flags & MTHCA_FLAG_FMR) {
1276 dev->ib_dev.alloc_fmr = mthca_alloc_fmr;
1277 dev->ib_dev.unmap_fmr = mthca_unmap_fmr;
1278 dev->ib_dev.dealloc_fmr = mthca_dealloc_fmr;
1279 if (mthca_is_memfree(dev))
1280 dev->ib_dev.map_phys_fmr = mthca_arbel_map_phys_fmr;
1281 else
1282 dev->ib_dev.map_phys_fmr = mthca_tavor_map_phys_fmr;
1283 }
1284
1285 dev->ib_dev.attach_mcast = mthca_multicast_attach;
1286 dev->ib_dev.detach_mcast = mthca_multicast_detach;
1287 dev->ib_dev.process_mad = mthca_process_mad;
1288
1289 if (mthca_is_memfree(dev)) {
1290 dev->ib_dev.req_notify_cq = mthca_arbel_arm_cq;
1291 dev->ib_dev.post_send = mthca_arbel_post_send;
1292 dev->ib_dev.post_recv = mthca_arbel_post_receive;
1293 } else {
1294 dev->ib_dev.req_notify_cq = mthca_tavor_arm_cq;
1295 dev->ib_dev.post_send = mthca_tavor_post_send;
1296 dev->ib_dev.post_recv = mthca_tavor_post_receive;
1297 }
1298
1299 mutex_init(&dev->cap_mask_mutex);
1300
1301 rdma_set_device_sysfs_group(&dev->ib_dev, &mthca_attr_group);
1302 dev->ib_dev.driver_id = RDMA_DRIVER_MTHCA;
1303 ret = ib_register_device(&dev->ib_dev, "mthca%d", NULL);
1304 if (ret)
1305 return ret;
1306
1307 mthca_start_catas_poll(dev);
1308
1309 return 0;
1310}
1311
1312void mthca_unregister_device(struct mthca_dev *dev)
1313{
1314 mthca_stop_catas_poll(dev);
1315 ib_unregister_device(&dev->ib_dev);
1316}
1317