1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#include <linux/file.h>
37#include <linux/fs.h>
38#include <linux/slab.h>
39
40#include <asm/uaccess.h>
41
42#include "uverbs.h"
43
44struct uverbs_lock_class {
45 struct lock_class_key key;
46 char name[16];
47};
48
49static struct uverbs_lock_class pd_lock_class = { .name = "PD-uobj" };
50static struct uverbs_lock_class mr_lock_class = { .name = "MR-uobj" };
51static struct uverbs_lock_class mw_lock_class = { .name = "MW-uobj" };
52static struct uverbs_lock_class cq_lock_class = { .name = "CQ-uobj" };
53static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" };
54static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" };
55static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" };
56static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
57#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
58static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
59#endif
60
61#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
62 do { \
63 (udata)->inbuf = (void __user *) (ibuf); \
64 (udata)->outbuf = (void __user *) (obuf); \
65 (udata)->inlen = (ilen); \
66 (udata)->outlen = (olen); \
67 } while (0)
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
95 struct ib_ucontext *context, struct uverbs_lock_class *c)
96{
97 uobj->user_handle = user_handle;
98 uobj->context = context;
99 kref_init(&uobj->ref);
100 init_rwsem(&uobj->mutex);
101 lockdep_set_class_and_name(&uobj->mutex, &c->key, c->name);
102 uobj->live = 0;
103}
104
105static void release_uobj(struct kref *kref)
106{
107 kfree(container_of(kref, struct ib_uobject, ref));
108}
109
110static void put_uobj(struct ib_uobject *uobj)
111{
112 kref_put(&uobj->ref, release_uobj);
113}
114
115static void put_uobj_read(struct ib_uobject *uobj)
116{
117 up_read(&uobj->mutex);
118 put_uobj(uobj);
119}
120
121static void put_uobj_write(struct ib_uobject *uobj)
122{
123 up_write(&uobj->mutex);
124 put_uobj(uobj);
125}
126
127static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj)
128{
129 int ret;
130
131 idr_preload(GFP_KERNEL);
132 spin_lock(&ib_uverbs_idr_lock);
133
134 ret = idr_alloc(idr, uobj, 0, 0, GFP_NOWAIT);
135 if (ret >= 0)
136 uobj->id = ret;
137
138 spin_unlock(&ib_uverbs_idr_lock);
139 idr_preload_end();
140
141 return ret < 0 ? ret : 0;
142}
143
144void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj)
145{
146 spin_lock(&ib_uverbs_idr_lock);
147 idr_remove(idr, uobj->id);
148 spin_unlock(&ib_uverbs_idr_lock);
149}
150
151static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
152 struct ib_ucontext *context)
153{
154 struct ib_uobject *uobj;
155
156 spin_lock(&ib_uverbs_idr_lock);
157 uobj = idr_find(idr, id);
158 if (uobj) {
159 if (uobj->context == context)
160 kref_get(&uobj->ref);
161 else
162 uobj = NULL;
163 }
164 spin_unlock(&ib_uverbs_idr_lock);
165
166 return uobj;
167}
168
169static struct ib_uobject *idr_read_uobj(struct idr *idr, int id,
170 struct ib_ucontext *context, int nested)
171{
172 struct ib_uobject *uobj;
173
174 uobj = __idr_get_uobj(idr, id, context);
175 if (!uobj)
176 return NULL;
177
178 if (nested)
179 down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING);
180 else
181 down_read(&uobj->mutex);
182 if (!uobj->live) {
183 put_uobj_read(uobj);
184 return NULL;
185 }
186
187 return uobj;
188}
189
190static struct ib_uobject *idr_write_uobj(struct idr *idr, int id,
191 struct ib_ucontext *context)
192{
193 struct ib_uobject *uobj;
194
195 uobj = __idr_get_uobj(idr, id, context);
196 if (!uobj)
197 return NULL;
198
199 down_write(&uobj->mutex);
200 if (!uobj->live) {
201 put_uobj_write(uobj);
202 return NULL;
203 }
204
205 return uobj;
206}
207
208static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context,
209 int nested)
210{
211 struct ib_uobject *uobj;
212
213 uobj = idr_read_uobj(idr, id, context, nested);
214 return uobj ? uobj->object : NULL;
215}
216
217static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context)
218{
219 return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0);
220}
221
222static void put_pd_read(struct ib_pd *pd)
223{
224 put_uobj_read(pd->uobject);
225}
226
227static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested)
228{
229 return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested);
230}
231
232static void put_cq_read(struct ib_cq *cq)
233{
234 put_uobj_read(cq->uobject);
235}
236
237static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context)
238{
239 return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0);
240}
241
242static void put_ah_read(struct ib_ah *ah)
243{
244 put_uobj_read(ah->uobject);
245}
246
247static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context)
248{
249 return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0);
250}
251
252static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context)
253{
254 struct ib_uobject *uobj;
255
256 uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context);
257 return uobj ? uobj->object : NULL;
258}
259
260static void put_qp_read(struct ib_qp *qp)
261{
262 put_uobj_read(qp->uobject);
263}
264
265static void put_qp_write(struct ib_qp *qp)
266{
267 put_uobj_write(qp->uobject);
268}
269
270static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context)
271{
272 return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0);
273}
274
275static void put_srq_read(struct ib_srq *srq)
276{
277 put_uobj_read(srq->uobject);
278}
279
280static struct ib_xrcd *idr_read_xrcd(int xrcd_handle, struct ib_ucontext *context,
281 struct ib_uobject **uobj)
282{
283 *uobj = idr_read_uobj(&ib_uverbs_xrcd_idr, xrcd_handle, context, 0);
284 return *uobj ? (*uobj)->object : NULL;
285}
286
287static void put_xrcd_read(struct ib_uobject *uobj)
288{
289 put_uobj_read(uobj);
290}
291
292ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
293 const char __user *buf,
294 int in_len, int out_len)
295{
296 struct ib_uverbs_get_context cmd;
297 struct ib_uverbs_get_context_resp resp;
298 struct ib_udata udata;
299 struct ib_device *ibdev = file->device->ib_dev;
300 struct ib_ucontext *ucontext;
301 struct file *filp;
302 int ret;
303
304 if (out_len < sizeof resp)
305 return -ENOSPC;
306
307 if (copy_from_user(&cmd, buf, sizeof cmd))
308 return -EFAULT;
309
310 mutex_lock(&file->mutex);
311
312 if (file->ucontext) {
313 ret = -EINVAL;
314 goto err;
315 }
316
317 INIT_UDATA(&udata, buf + sizeof cmd,
318 (unsigned long) cmd.response + sizeof resp,
319 in_len - sizeof cmd, out_len - sizeof resp);
320
321 ucontext = ibdev->alloc_ucontext(ibdev, &udata);
322 if (IS_ERR(ucontext)) {
323 ret = PTR_ERR(ucontext);
324 goto err;
325 }
326
327 ucontext->device = ibdev;
328 INIT_LIST_HEAD(&ucontext->pd_list);
329 INIT_LIST_HEAD(&ucontext->mr_list);
330 INIT_LIST_HEAD(&ucontext->mw_list);
331 INIT_LIST_HEAD(&ucontext->cq_list);
332 INIT_LIST_HEAD(&ucontext->qp_list);
333 INIT_LIST_HEAD(&ucontext->srq_list);
334 INIT_LIST_HEAD(&ucontext->ah_list);
335 INIT_LIST_HEAD(&ucontext->xrcd_list);
336 INIT_LIST_HEAD(&ucontext->rule_list);
337 ucontext->closing = 0;
338
339 resp.num_comp_vectors = file->device->num_comp_vectors;
340
341 ret = get_unused_fd_flags(O_CLOEXEC);
342 if (ret < 0)
343 goto err_free;
344 resp.async_fd = ret;
345
346 filp = ib_uverbs_alloc_event_file(file, 1);
347 if (IS_ERR(filp)) {
348 ret = PTR_ERR(filp);
349 goto err_fd;
350 }
351
352 if (copy_to_user((void __user *) (unsigned long) cmd.response,
353 &resp, sizeof resp)) {
354 ret = -EFAULT;
355 goto err_file;
356 }
357
358 file->async_file = filp->private_data;
359
360 INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev,
361 ib_uverbs_event_handler);
362 ret = ib_register_event_handler(&file->event_handler);
363 if (ret)
364 goto err_file;
365
366 kref_get(&file->async_file->ref);
367 kref_get(&file->ref);
368 file->ucontext = ucontext;
369
370 fd_install(resp.async_fd, filp);
371
372 mutex_unlock(&file->mutex);
373
374 return in_len;
375
376err_file:
377 fput(filp);
378
379err_fd:
380 put_unused_fd(resp.async_fd);
381
382err_free:
383 ibdev->dealloc_ucontext(ucontext);
384
385err:
386 mutex_unlock(&file->mutex);
387 return ret;
388}
389
390ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
391 const char __user *buf,
392 int in_len, int out_len)
393{
394 struct ib_uverbs_query_device cmd;
395 struct ib_uverbs_query_device_resp resp;
396 struct ib_device_attr attr;
397 int ret;
398
399 if (out_len < sizeof resp)
400 return -ENOSPC;
401
402 if (copy_from_user(&cmd, buf, sizeof cmd))
403 return -EFAULT;
404
405 ret = ib_query_device(file->device->ib_dev, &attr);
406 if (ret)
407 return ret;
408
409 memset(&resp, 0, sizeof resp);
410
411 resp.fw_ver = attr.fw_ver;
412 resp.node_guid = file->device->ib_dev->node_guid;
413 resp.sys_image_guid = attr.sys_image_guid;
414 resp.max_mr_size = attr.max_mr_size;
415 resp.page_size_cap = attr.page_size_cap;
416 resp.vendor_id = attr.vendor_id;
417 resp.vendor_part_id = attr.vendor_part_id;
418 resp.hw_ver = attr.hw_ver;
419 resp.max_qp = attr.max_qp;
420 resp.max_qp_wr = attr.max_qp_wr;
421 resp.device_cap_flags = attr.device_cap_flags;
422 resp.max_sge = attr.max_sge;
423 resp.max_sge_rd = attr.max_sge_rd;
424 resp.max_cq = attr.max_cq;
425 resp.max_cqe = attr.max_cqe;
426 resp.max_mr = attr.max_mr;
427 resp.max_pd = attr.max_pd;
428 resp.max_qp_rd_atom = attr.max_qp_rd_atom;
429 resp.max_ee_rd_atom = attr.max_ee_rd_atom;
430 resp.max_res_rd_atom = attr.max_res_rd_atom;
431 resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom;
432 resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom;
433 resp.atomic_cap = attr.atomic_cap;
434 resp.max_ee = attr.max_ee;
435 resp.max_rdd = attr.max_rdd;
436 resp.max_mw = attr.max_mw;
437 resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp;
438 resp.max_raw_ethy_qp = attr.max_raw_ethy_qp;
439 resp.max_mcast_grp = attr.max_mcast_grp;
440 resp.max_mcast_qp_attach = attr.max_mcast_qp_attach;
441 resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
442 resp.max_ah = attr.max_ah;
443 resp.max_fmr = attr.max_fmr;
444 resp.max_map_per_fmr = attr.max_map_per_fmr;
445 resp.max_srq = attr.max_srq;
446 resp.max_srq_wr = attr.max_srq_wr;
447 resp.max_srq_sge = attr.max_srq_sge;
448 resp.max_pkeys = attr.max_pkeys;
449 resp.local_ca_ack_delay = attr.local_ca_ack_delay;
450 resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt;
451
452 if (copy_to_user((void __user *) (unsigned long) cmd.response,
453 &resp, sizeof resp))
454 return -EFAULT;
455
456 return in_len;
457}
458
459ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
460 const char __user *buf,
461 int in_len, int out_len)
462{
463 struct ib_uverbs_query_port cmd;
464 struct ib_uverbs_query_port_resp resp;
465 struct ib_port_attr attr;
466 int ret;
467
468 if (out_len < sizeof resp)
469 return -ENOSPC;
470
471 if (copy_from_user(&cmd, buf, sizeof cmd))
472 return -EFAULT;
473
474 ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr);
475 if (ret)
476 return ret;
477
478 memset(&resp, 0, sizeof resp);
479
480 resp.state = attr.state;
481 resp.max_mtu = attr.max_mtu;
482 resp.active_mtu = attr.active_mtu;
483 resp.gid_tbl_len = attr.gid_tbl_len;
484 resp.port_cap_flags = attr.port_cap_flags;
485 resp.max_msg_sz = attr.max_msg_sz;
486 resp.bad_pkey_cntr = attr.bad_pkey_cntr;
487 resp.qkey_viol_cntr = attr.qkey_viol_cntr;
488 resp.pkey_tbl_len = attr.pkey_tbl_len;
489 resp.lid = attr.lid;
490 resp.sm_lid = attr.sm_lid;
491 resp.lmc = attr.lmc;
492 resp.max_vl_num = attr.max_vl_num;
493 resp.sm_sl = attr.sm_sl;
494 resp.subnet_timeout = attr.subnet_timeout;
495 resp.init_type_reply = attr.init_type_reply;
496 resp.active_width = attr.active_width;
497 resp.active_speed = attr.active_speed;
498 resp.phys_state = attr.phys_state;
499 resp.link_layer = rdma_port_get_link_layer(file->device->ib_dev,
500 cmd.port_num);
501
502 if (copy_to_user((void __user *) (unsigned long) cmd.response,
503 &resp, sizeof resp))
504 return -EFAULT;
505
506 return in_len;
507}
508
509ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
510 const char __user *buf,
511 int in_len, int out_len)
512{
513 struct ib_uverbs_alloc_pd cmd;
514 struct ib_uverbs_alloc_pd_resp resp;
515 struct ib_udata udata;
516 struct ib_uobject *uobj;
517 struct ib_pd *pd;
518 int ret;
519
520 if (out_len < sizeof resp)
521 return -ENOSPC;
522
523 if (copy_from_user(&cmd, buf, sizeof cmd))
524 return -EFAULT;
525
526 INIT_UDATA(&udata, buf + sizeof cmd,
527 (unsigned long) cmd.response + sizeof resp,
528 in_len - sizeof cmd, out_len - sizeof resp);
529
530 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
531 if (!uobj)
532 return -ENOMEM;
533
534 init_uobj(uobj, 0, file->ucontext, &pd_lock_class);
535 down_write(&uobj->mutex);
536
537 pd = file->device->ib_dev->alloc_pd(file->device->ib_dev,
538 file->ucontext, &udata);
539 if (IS_ERR(pd)) {
540 ret = PTR_ERR(pd);
541 goto err;
542 }
543
544 pd->device = file->device->ib_dev;
545 pd->uobject = uobj;
546 atomic_set(&pd->usecnt, 0);
547
548 uobj->object = pd;
549 ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj);
550 if (ret)
551 goto err_idr;
552
553 memset(&resp, 0, sizeof resp);
554 resp.pd_handle = uobj->id;
555
556 if (copy_to_user((void __user *) (unsigned long) cmd.response,
557 &resp, sizeof resp)) {
558 ret = -EFAULT;
559 goto err_copy;
560 }
561
562 mutex_lock(&file->mutex);
563 list_add_tail(&uobj->list, &file->ucontext->pd_list);
564 mutex_unlock(&file->mutex);
565
566 uobj->live = 1;
567
568 up_write(&uobj->mutex);
569
570 return in_len;
571
572err_copy:
573 idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
574
575err_idr:
576 ib_dealloc_pd(pd);
577
578err:
579 put_uobj_write(uobj);
580 return ret;
581}
582
583ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
584 const char __user *buf,
585 int in_len, int out_len)
586{
587 struct ib_uverbs_dealloc_pd cmd;
588 struct ib_uobject *uobj;
589 int ret;
590
591 if (copy_from_user(&cmd, buf, sizeof cmd))
592 return -EFAULT;
593
594 uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext);
595 if (!uobj)
596 return -EINVAL;
597
598 ret = ib_dealloc_pd(uobj->object);
599 if (!ret)
600 uobj->live = 0;
601
602 put_uobj_write(uobj);
603
604 if (ret)
605 return ret;
606
607 idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
608
609 mutex_lock(&file->mutex);
610 list_del(&uobj->list);
611 mutex_unlock(&file->mutex);
612
613 put_uobj(uobj);
614
615 return in_len;
616}
617
618struct xrcd_table_entry {
619 struct rb_node node;
620 struct ib_xrcd *xrcd;
621 struct inode *inode;
622};
623
624static int xrcd_table_insert(struct ib_uverbs_device *dev,
625 struct inode *inode,
626 struct ib_xrcd *xrcd)
627{
628 struct xrcd_table_entry *entry, *scan;
629 struct rb_node **p = &dev->xrcd_tree.rb_node;
630 struct rb_node *parent = NULL;
631
632 entry = kmalloc(sizeof *entry, GFP_KERNEL);
633 if (!entry)
634 return -ENOMEM;
635
636 entry->xrcd = xrcd;
637 entry->inode = inode;
638
639 while (*p) {
640 parent = *p;
641 scan = rb_entry(parent, struct xrcd_table_entry, node);
642
643 if (inode < scan->inode) {
644 p = &(*p)->rb_left;
645 } else if (inode > scan->inode) {
646 p = &(*p)->rb_right;
647 } else {
648 kfree(entry);
649 return -EEXIST;
650 }
651 }
652
653 rb_link_node(&entry->node, parent, p);
654 rb_insert_color(&entry->node, &dev->xrcd_tree);
655 igrab(inode);
656 return 0;
657}
658
659static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev,
660 struct inode *inode)
661{
662 struct xrcd_table_entry *entry;
663 struct rb_node *p = dev->xrcd_tree.rb_node;
664
665 while (p) {
666 entry = rb_entry(p, struct xrcd_table_entry, node);
667
668 if (inode < entry->inode)
669 p = p->rb_left;
670 else if (inode > entry->inode)
671 p = p->rb_right;
672 else
673 return entry;
674 }
675
676 return NULL;
677}
678
679static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode)
680{
681 struct xrcd_table_entry *entry;
682
683 entry = xrcd_table_search(dev, inode);
684 if (!entry)
685 return NULL;
686
687 return entry->xrcd;
688}
689
690static void xrcd_table_delete(struct ib_uverbs_device *dev,
691 struct inode *inode)
692{
693 struct xrcd_table_entry *entry;
694
695 entry = xrcd_table_search(dev, inode);
696 if (entry) {
697 iput(inode);
698 rb_erase(&entry->node, &dev->xrcd_tree);
699 kfree(entry);
700 }
701}
702
703ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
704 const char __user *buf, int in_len,
705 int out_len)
706{
707 struct ib_uverbs_open_xrcd cmd;
708 struct ib_uverbs_open_xrcd_resp resp;
709 struct ib_udata udata;
710 struct ib_uxrcd_object *obj;
711 struct ib_xrcd *xrcd = NULL;
712 struct fd f = {NULL, 0};
713 struct inode *inode = NULL;
714 int ret = 0;
715 int new_xrcd = 0;
716
717 if (out_len < sizeof resp)
718 return -ENOSPC;
719
720 if (copy_from_user(&cmd, buf, sizeof cmd))
721 return -EFAULT;
722
723 INIT_UDATA(&udata, buf + sizeof cmd,
724 (unsigned long) cmd.response + sizeof resp,
725 in_len - sizeof cmd, out_len - sizeof resp);
726
727 mutex_lock(&file->device->xrcd_tree_mutex);
728
729 if (cmd.fd != -1) {
730
731 f = fdget(cmd.fd);
732 if (!f.file) {
733 ret = -EBADF;
734 goto err_tree_mutex_unlock;
735 }
736
737 inode = file_inode(f.file);
738 xrcd = find_xrcd(file->device, inode);
739 if (!xrcd && !(cmd.oflags & O_CREAT)) {
740
741 ret = -EAGAIN;
742 goto err_tree_mutex_unlock;
743 }
744
745 if (xrcd && cmd.oflags & O_EXCL) {
746 ret = -EINVAL;
747 goto err_tree_mutex_unlock;
748 }
749 }
750
751 obj = kmalloc(sizeof *obj, GFP_KERNEL);
752 if (!obj) {
753 ret = -ENOMEM;
754 goto err_tree_mutex_unlock;
755 }
756
757 init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_class);
758
759 down_write(&obj->uobject.mutex);
760
761 if (!xrcd) {
762 xrcd = file->device->ib_dev->alloc_xrcd(file->device->ib_dev,
763 file->ucontext, &udata);
764 if (IS_ERR(xrcd)) {
765 ret = PTR_ERR(xrcd);
766 goto err;
767 }
768
769 xrcd->inode = inode;
770 xrcd->device = file->device->ib_dev;
771 atomic_set(&xrcd->usecnt, 0);
772 mutex_init(&xrcd->tgt_qp_mutex);
773 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
774 new_xrcd = 1;
775 }
776
777 atomic_set(&obj->refcnt, 0);
778 obj->uobject.object = xrcd;
779 ret = idr_add_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
780 if (ret)
781 goto err_idr;
782
783 memset(&resp, 0, sizeof resp);
784 resp.xrcd_handle = obj->uobject.id;
785
786 if (inode) {
787 if (new_xrcd) {
788
789 ret = xrcd_table_insert(file->device, inode, xrcd);
790 if (ret)
791 goto err_insert_xrcd;
792 }
793 atomic_inc(&xrcd->usecnt);
794 }
795
796 if (copy_to_user((void __user *) (unsigned long) cmd.response,
797 &resp, sizeof resp)) {
798 ret = -EFAULT;
799 goto err_copy;
800 }
801
802 if (f.file)
803 fdput(f);
804
805 mutex_lock(&file->mutex);
806 list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list);
807 mutex_unlock(&file->mutex);
808
809 obj->uobject.live = 1;
810 up_write(&obj->uobject.mutex);
811
812 mutex_unlock(&file->device->xrcd_tree_mutex);
813 return in_len;
814
815err_copy:
816 if (inode) {
817 if (new_xrcd)
818 xrcd_table_delete(file->device, inode);
819 atomic_dec(&xrcd->usecnt);
820 }
821
822err_insert_xrcd:
823 idr_remove_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
824
825err_idr:
826 ib_dealloc_xrcd(xrcd);
827
828err:
829 put_uobj_write(&obj->uobject);
830
831err_tree_mutex_unlock:
832 if (f.file)
833 fdput(f);
834
835 mutex_unlock(&file->device->xrcd_tree_mutex);
836
837 return ret;
838}
839
840ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,
841 const char __user *buf, int in_len,
842 int out_len)
843{
844 struct ib_uverbs_close_xrcd cmd;
845 struct ib_uobject *uobj;
846 struct ib_xrcd *xrcd = NULL;
847 struct inode *inode = NULL;
848 struct ib_uxrcd_object *obj;
849 int live;
850 int ret = 0;
851
852 if (copy_from_user(&cmd, buf, sizeof cmd))
853 return -EFAULT;
854
855 mutex_lock(&file->device->xrcd_tree_mutex);
856 uobj = idr_write_uobj(&ib_uverbs_xrcd_idr, cmd.xrcd_handle, file->ucontext);
857 if (!uobj) {
858 ret = -EINVAL;
859 goto out;
860 }
861
862 xrcd = uobj->object;
863 inode = xrcd->inode;
864 obj = container_of(uobj, struct ib_uxrcd_object, uobject);
865 if (atomic_read(&obj->refcnt)) {
866 put_uobj_write(uobj);
867 ret = -EBUSY;
868 goto out;
869 }
870
871 if (!inode || atomic_dec_and_test(&xrcd->usecnt)) {
872 ret = ib_dealloc_xrcd(uobj->object);
873 if (!ret)
874 uobj->live = 0;
875 }
876
877 live = uobj->live;
878 if (inode && ret)
879 atomic_inc(&xrcd->usecnt);
880
881 put_uobj_write(uobj);
882
883 if (ret)
884 goto out;
885
886 if (inode && !live)
887 xrcd_table_delete(file->device, inode);
888
889 idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj);
890 mutex_lock(&file->mutex);
891 list_del(&uobj->list);
892 mutex_unlock(&file->mutex);
893
894 put_uobj(uobj);
895 ret = in_len;
896
897out:
898 mutex_unlock(&file->device->xrcd_tree_mutex);
899 return ret;
900}
901
902void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev,
903 struct ib_xrcd *xrcd)
904{
905 struct inode *inode;
906
907 inode = xrcd->inode;
908 if (inode && !atomic_dec_and_test(&xrcd->usecnt))
909 return;
910
911 ib_dealloc_xrcd(xrcd);
912
913 if (inode)
914 xrcd_table_delete(dev, inode);
915}
916
917ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
918 const char __user *buf, int in_len,
919 int out_len)
920{
921 struct ib_uverbs_reg_mr cmd;
922 struct ib_uverbs_reg_mr_resp resp;
923 struct ib_udata udata;
924 struct ib_uobject *uobj;
925 struct ib_pd *pd;
926 struct ib_mr *mr;
927 int ret;
928
929 if (out_len < sizeof resp)
930 return -ENOSPC;
931
932 if (copy_from_user(&cmd, buf, sizeof cmd))
933 return -EFAULT;
934
935 INIT_UDATA(&udata, buf + sizeof cmd,
936 (unsigned long) cmd.response + sizeof resp,
937 in_len - sizeof cmd, out_len - sizeof resp);
938
939 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
940 return -EINVAL;
941
942
943
944
945
946 if (cmd.access_flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
947 !(cmd.access_flags & IB_ACCESS_LOCAL_WRITE))
948 return -EINVAL;
949
950 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
951 if (!uobj)
952 return -ENOMEM;
953
954 init_uobj(uobj, 0, file->ucontext, &mr_lock_class);
955 down_write(&uobj->mutex);
956
957 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
958 if (!pd) {
959 ret = -EINVAL;
960 goto err_free;
961 }
962
963 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
964 cmd.access_flags, &udata);
965 if (IS_ERR(mr)) {
966 ret = PTR_ERR(mr);
967 goto err_put;
968 }
969
970 mr->device = pd->device;
971 mr->pd = pd;
972 mr->uobject = uobj;
973 atomic_inc(&pd->usecnt);
974 atomic_set(&mr->usecnt, 0);
975
976 uobj->object = mr;
977 ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj);
978 if (ret)
979 goto err_unreg;
980
981 memset(&resp, 0, sizeof resp);
982 resp.lkey = mr->lkey;
983 resp.rkey = mr->rkey;
984 resp.mr_handle = uobj->id;
985
986 if (copy_to_user((void __user *) (unsigned long) cmd.response,
987 &resp, sizeof resp)) {
988 ret = -EFAULT;
989 goto err_copy;
990 }
991
992 put_pd_read(pd);
993
994 mutex_lock(&file->mutex);
995 list_add_tail(&uobj->list, &file->ucontext->mr_list);
996 mutex_unlock(&file->mutex);
997
998 uobj->live = 1;
999
1000 up_write(&uobj->mutex);
1001
1002 return in_len;
1003
1004err_copy:
1005 idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
1006
1007err_unreg:
1008 ib_dereg_mr(mr);
1009
1010err_put:
1011 put_pd_read(pd);
1012
1013err_free:
1014 put_uobj_write(uobj);
1015 return ret;
1016}
1017
1018ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
1019 const char __user *buf, int in_len,
1020 int out_len)
1021{
1022 struct ib_uverbs_dereg_mr cmd;
1023 struct ib_mr *mr;
1024 struct ib_uobject *uobj;
1025 int ret = -EINVAL;
1026
1027 if (copy_from_user(&cmd, buf, sizeof cmd))
1028 return -EFAULT;
1029
1030 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext);
1031 if (!uobj)
1032 return -EINVAL;
1033
1034 mr = uobj->object;
1035
1036 ret = ib_dereg_mr(mr);
1037 if (!ret)
1038 uobj->live = 0;
1039
1040 put_uobj_write(uobj);
1041
1042 if (ret)
1043 return ret;
1044
1045 idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
1046
1047 mutex_lock(&file->mutex);
1048 list_del(&uobj->list);
1049 mutex_unlock(&file->mutex);
1050
1051 put_uobj(uobj);
1052
1053 return in_len;
1054}
1055
1056ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
1057 const char __user *buf, int in_len,
1058 int out_len)
1059{
1060 struct ib_uverbs_alloc_mw cmd;
1061 struct ib_uverbs_alloc_mw_resp resp;
1062 struct ib_uobject *uobj;
1063 struct ib_pd *pd;
1064 struct ib_mw *mw;
1065 int ret;
1066
1067 if (out_len < sizeof(resp))
1068 return -ENOSPC;
1069
1070 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1071 return -EFAULT;
1072
1073 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
1074 if (!uobj)
1075 return -ENOMEM;
1076
1077 init_uobj(uobj, 0, file->ucontext, &mw_lock_class);
1078 down_write(&uobj->mutex);
1079
1080 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1081 if (!pd) {
1082 ret = -EINVAL;
1083 goto err_free;
1084 }
1085
1086 mw = pd->device->alloc_mw(pd, cmd.mw_type);
1087 if (IS_ERR(mw)) {
1088 ret = PTR_ERR(mw);
1089 goto err_put;
1090 }
1091
1092 mw->device = pd->device;
1093 mw->pd = pd;
1094 mw->uobject = uobj;
1095 atomic_inc(&pd->usecnt);
1096
1097 uobj->object = mw;
1098 ret = idr_add_uobj(&ib_uverbs_mw_idr, uobj);
1099 if (ret)
1100 goto err_unalloc;
1101
1102 memset(&resp, 0, sizeof(resp));
1103 resp.rkey = mw->rkey;
1104 resp.mw_handle = uobj->id;
1105
1106 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1107 &resp, sizeof(resp))) {
1108 ret = -EFAULT;
1109 goto err_copy;
1110 }
1111
1112 put_pd_read(pd);
1113
1114 mutex_lock(&file->mutex);
1115 list_add_tail(&uobj->list, &file->ucontext->mw_list);
1116 mutex_unlock(&file->mutex);
1117
1118 uobj->live = 1;
1119
1120 up_write(&uobj->mutex);
1121
1122 return in_len;
1123
1124err_copy:
1125 idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
1126
1127err_unalloc:
1128 ib_dealloc_mw(mw);
1129
1130err_put:
1131 put_pd_read(pd);
1132
1133err_free:
1134 put_uobj_write(uobj);
1135 return ret;
1136}
1137
1138ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file,
1139 const char __user *buf, int in_len,
1140 int out_len)
1141{
1142 struct ib_uverbs_dealloc_mw cmd;
1143 struct ib_mw *mw;
1144 struct ib_uobject *uobj;
1145 int ret = -EINVAL;
1146
1147 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1148 return -EFAULT;
1149
1150 uobj = idr_write_uobj(&ib_uverbs_mw_idr, cmd.mw_handle, file->ucontext);
1151 if (!uobj)
1152 return -EINVAL;
1153
1154 mw = uobj->object;
1155
1156 ret = ib_dealloc_mw(mw);
1157 if (!ret)
1158 uobj->live = 0;
1159
1160 put_uobj_write(uobj);
1161
1162 if (ret)
1163 return ret;
1164
1165 idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
1166
1167 mutex_lock(&file->mutex);
1168 list_del(&uobj->list);
1169 mutex_unlock(&file->mutex);
1170
1171 put_uobj(uobj);
1172
1173 return in_len;
1174}
1175
1176ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
1177 const char __user *buf, int in_len,
1178 int out_len)
1179{
1180 struct ib_uverbs_create_comp_channel cmd;
1181 struct ib_uverbs_create_comp_channel_resp resp;
1182 struct file *filp;
1183 int ret;
1184
1185 if (out_len < sizeof resp)
1186 return -ENOSPC;
1187
1188 if (copy_from_user(&cmd, buf, sizeof cmd))
1189 return -EFAULT;
1190
1191 ret = get_unused_fd_flags(O_CLOEXEC);
1192 if (ret < 0)
1193 return ret;
1194 resp.fd = ret;
1195
1196 filp = ib_uverbs_alloc_event_file(file, 0);
1197 if (IS_ERR(filp)) {
1198 put_unused_fd(resp.fd);
1199 return PTR_ERR(filp);
1200 }
1201
1202 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1203 &resp, sizeof resp)) {
1204 put_unused_fd(resp.fd);
1205 fput(filp);
1206 return -EFAULT;
1207 }
1208
1209 fd_install(resp.fd, filp);
1210 return in_len;
1211}
1212
1213ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
1214 const char __user *buf, int in_len,
1215 int out_len)
1216{
1217 struct ib_uverbs_create_cq cmd;
1218 struct ib_uverbs_create_cq_resp resp;
1219 struct ib_udata udata;
1220 struct ib_ucq_object *obj;
1221 struct ib_uverbs_event_file *ev_file = NULL;
1222 struct ib_cq *cq;
1223 int ret;
1224
1225 if (out_len < sizeof resp)
1226 return -ENOSPC;
1227
1228 if (copy_from_user(&cmd, buf, sizeof cmd))
1229 return -EFAULT;
1230
1231 INIT_UDATA(&udata, buf + sizeof cmd,
1232 (unsigned long) cmd.response + sizeof resp,
1233 in_len - sizeof cmd, out_len - sizeof resp);
1234
1235 if (cmd.comp_vector >= file->device->num_comp_vectors)
1236 return -EINVAL;
1237
1238 obj = kmalloc(sizeof *obj, GFP_KERNEL);
1239 if (!obj)
1240 return -ENOMEM;
1241
1242 init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_class);
1243 down_write(&obj->uobject.mutex);
1244
1245 if (cmd.comp_channel >= 0) {
1246 ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel);
1247 if (!ev_file) {
1248 ret = -EINVAL;
1249 goto err;
1250 }
1251 }
1252
1253 obj->uverbs_file = file;
1254 obj->comp_events_reported = 0;
1255 obj->async_events_reported = 0;
1256 INIT_LIST_HEAD(&obj->comp_list);
1257 INIT_LIST_HEAD(&obj->async_list);
1258
1259 cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe,
1260 cmd.comp_vector,
1261 file->ucontext, &udata);
1262 if (IS_ERR(cq)) {
1263 ret = PTR_ERR(cq);
1264 goto err_file;
1265 }
1266
1267 cq->device = file->device->ib_dev;
1268 cq->uobject = &obj->uobject;
1269 cq->comp_handler = ib_uverbs_comp_handler;
1270 cq->event_handler = ib_uverbs_cq_event_handler;
1271 cq->cq_context = ev_file;
1272 atomic_set(&cq->usecnt, 0);
1273
1274 obj->uobject.object = cq;
1275 ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject);
1276 if (ret)
1277 goto err_free;
1278
1279 memset(&resp, 0, sizeof resp);
1280 resp.cq_handle = obj->uobject.id;
1281 resp.cqe = cq->cqe;
1282
1283 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1284 &resp, sizeof resp)) {
1285 ret = -EFAULT;
1286 goto err_copy;
1287 }
1288
1289 mutex_lock(&file->mutex);
1290 list_add_tail(&obj->uobject.list, &file->ucontext->cq_list);
1291 mutex_unlock(&file->mutex);
1292
1293 obj->uobject.live = 1;
1294
1295 up_write(&obj->uobject.mutex);
1296
1297 return in_len;
1298
1299err_copy:
1300 idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject);
1301
1302err_free:
1303 ib_destroy_cq(cq);
1304
1305err_file:
1306 if (ev_file)
1307 ib_uverbs_release_ucq(file, ev_file, obj);
1308
1309err:
1310 put_uobj_write(&obj->uobject);
1311 return ret;
1312}
1313
1314ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
1315 const char __user *buf, int in_len,
1316 int out_len)
1317{
1318 struct ib_uverbs_resize_cq cmd;
1319 struct ib_uverbs_resize_cq_resp resp;
1320 struct ib_udata udata;
1321 struct ib_cq *cq;
1322 int ret = -EINVAL;
1323
1324 if (copy_from_user(&cmd, buf, sizeof cmd))
1325 return -EFAULT;
1326
1327 INIT_UDATA(&udata, buf + sizeof cmd,
1328 (unsigned long) cmd.response + sizeof resp,
1329 in_len - sizeof cmd, out_len - sizeof resp);
1330
1331 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1332 if (!cq)
1333 return -EINVAL;
1334
1335 ret = cq->device->resize_cq(cq, cmd.cqe, &udata);
1336 if (ret)
1337 goto out;
1338
1339 resp.cqe = cq->cqe;
1340
1341 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1342 &resp, sizeof resp.cqe))
1343 ret = -EFAULT;
1344
1345out:
1346 put_cq_read(cq);
1347
1348 return ret ? ret : in_len;
1349}
1350
1351static int copy_wc_to_user(void __user *dest, struct ib_wc *wc)
1352{
1353 struct ib_uverbs_wc tmp;
1354
1355 tmp.wr_id = wc->wr_id;
1356 tmp.status = wc->status;
1357 tmp.opcode = wc->opcode;
1358 tmp.vendor_err = wc->vendor_err;
1359 tmp.byte_len = wc->byte_len;
1360 tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data;
1361 tmp.qp_num = wc->qp->qp_num;
1362 tmp.src_qp = wc->src_qp;
1363 tmp.wc_flags = wc->wc_flags;
1364 tmp.pkey_index = wc->pkey_index;
1365 tmp.slid = wc->slid;
1366 tmp.sl = wc->sl;
1367 tmp.dlid_path_bits = wc->dlid_path_bits;
1368 tmp.port_num = wc->port_num;
1369 tmp.reserved = 0;
1370
1371 if (copy_to_user(dest, &tmp, sizeof tmp))
1372 return -EFAULT;
1373
1374 return 0;
1375}
1376
1377ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
1378 const char __user *buf, int in_len,
1379 int out_len)
1380{
1381 struct ib_uverbs_poll_cq cmd;
1382 struct ib_uverbs_poll_cq_resp resp;
1383 u8 __user *header_ptr;
1384 u8 __user *data_ptr;
1385 struct ib_cq *cq;
1386 struct ib_wc wc;
1387 int ret;
1388
1389 if (copy_from_user(&cmd, buf, sizeof cmd))
1390 return -EFAULT;
1391
1392 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1393 if (!cq)
1394 return -EINVAL;
1395
1396
1397 header_ptr = (void __user *)(unsigned long) cmd.response;
1398 data_ptr = header_ptr + sizeof resp;
1399
1400 memset(&resp, 0, sizeof resp);
1401 while (resp.count < cmd.ne) {
1402 ret = ib_poll_cq(cq, 1, &wc);
1403 if (ret < 0)
1404 goto out_put;
1405 if (!ret)
1406 break;
1407
1408 ret = copy_wc_to_user(data_ptr, &wc);
1409 if (ret)
1410 goto out_put;
1411
1412 data_ptr += sizeof(struct ib_uverbs_wc);
1413 ++resp.count;
1414 }
1415
1416 if (copy_to_user(header_ptr, &resp, sizeof resp)) {
1417 ret = -EFAULT;
1418 goto out_put;
1419 }
1420
1421 ret = in_len;
1422
1423out_put:
1424 put_cq_read(cq);
1425 return ret;
1426}
1427
1428ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
1429 const char __user *buf, int in_len,
1430 int out_len)
1431{
1432 struct ib_uverbs_req_notify_cq cmd;
1433 struct ib_cq *cq;
1434
1435 if (copy_from_user(&cmd, buf, sizeof cmd))
1436 return -EFAULT;
1437
1438 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1439 if (!cq)
1440 return -EINVAL;
1441
1442 ib_req_notify_cq(cq, cmd.solicited_only ?
1443 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
1444
1445 put_cq_read(cq);
1446
1447 return in_len;
1448}
1449
1450ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
1451 const char __user *buf, int in_len,
1452 int out_len)
1453{
1454 struct ib_uverbs_destroy_cq cmd;
1455 struct ib_uverbs_destroy_cq_resp resp;
1456 struct ib_uobject *uobj;
1457 struct ib_cq *cq;
1458 struct ib_ucq_object *obj;
1459 struct ib_uverbs_event_file *ev_file;
1460 int ret = -EINVAL;
1461
1462 if (copy_from_user(&cmd, buf, sizeof cmd))
1463 return -EFAULT;
1464
1465 uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext);
1466 if (!uobj)
1467 return -EINVAL;
1468 cq = uobj->object;
1469 ev_file = cq->cq_context;
1470 obj = container_of(cq->uobject, struct ib_ucq_object, uobject);
1471
1472 ret = ib_destroy_cq(cq);
1473 if (!ret)
1474 uobj->live = 0;
1475
1476 put_uobj_write(uobj);
1477
1478 if (ret)
1479 return ret;
1480
1481 idr_remove_uobj(&ib_uverbs_cq_idr, uobj);
1482
1483 mutex_lock(&file->mutex);
1484 list_del(&uobj->list);
1485 mutex_unlock(&file->mutex);
1486
1487 ib_uverbs_release_ucq(file, ev_file, obj);
1488
1489 memset(&resp, 0, sizeof resp);
1490 resp.comp_events_reported = obj->comp_events_reported;
1491 resp.async_events_reported = obj->async_events_reported;
1492
1493 put_uobj(uobj);
1494
1495 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1496 &resp, sizeof resp))
1497 return -EFAULT;
1498
1499 return in_len;
1500}
1501
1502ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1503 const char __user *buf, int in_len,
1504 int out_len)
1505{
1506 struct ib_uverbs_create_qp cmd;
1507 struct ib_uverbs_create_qp_resp resp;
1508 struct ib_udata udata;
1509 struct ib_uqp_object *obj;
1510 struct ib_device *device;
1511 struct ib_pd *pd = NULL;
1512 struct ib_xrcd *xrcd = NULL;
1513 struct ib_uobject *uninitialized_var(xrcd_uobj);
1514 struct ib_cq *scq = NULL, *rcq = NULL;
1515 struct ib_srq *srq = NULL;
1516 struct ib_qp *qp;
1517 struct ib_qp_init_attr attr;
1518 int ret;
1519
1520 if (out_len < sizeof resp)
1521 return -ENOSPC;
1522
1523 if (copy_from_user(&cmd, buf, sizeof cmd))
1524 return -EFAULT;
1525
1526 if (cmd.qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
1527 return -EPERM;
1528
1529 INIT_UDATA(&udata, buf + sizeof cmd,
1530 (unsigned long) cmd.response + sizeof resp,
1531 in_len - sizeof cmd, out_len - sizeof resp);
1532
1533 obj = kzalloc(sizeof *obj, GFP_KERNEL);
1534 if (!obj)
1535 return -ENOMEM;
1536
1537 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
1538 down_write(&obj->uevent.uobject.mutex);
1539
1540 if (cmd.qp_type == IB_QPT_XRC_TGT) {
1541 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
1542 if (!xrcd) {
1543 ret = -EINVAL;
1544 goto err_put;
1545 }
1546 device = xrcd->device;
1547 } else {
1548 if (cmd.qp_type == IB_QPT_XRC_INI) {
1549 cmd.max_recv_wr = cmd.max_recv_sge = 0;
1550 } else {
1551 if (cmd.is_srq) {
1552 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
1553 if (!srq || srq->srq_type != IB_SRQT_BASIC) {
1554 ret = -EINVAL;
1555 goto err_put;
1556 }
1557 }
1558
1559 if (cmd.recv_cq_handle != cmd.send_cq_handle) {
1560 rcq = idr_read_cq(cmd.recv_cq_handle, file->ucontext, 0);
1561 if (!rcq) {
1562 ret = -EINVAL;
1563 goto err_put;
1564 }
1565 }
1566 }
1567
1568 scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, !!rcq);
1569 rcq = rcq ?: scq;
1570 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1571 if (!pd || !scq) {
1572 ret = -EINVAL;
1573 goto err_put;
1574 }
1575
1576 device = pd->device;
1577 }
1578
1579 attr.event_handler = ib_uverbs_qp_event_handler;
1580 attr.qp_context = file;
1581 attr.send_cq = scq;
1582 attr.recv_cq = rcq;
1583 attr.srq = srq;
1584 attr.xrcd = xrcd;
1585 attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
1586 attr.qp_type = cmd.qp_type;
1587 attr.create_flags = 0;
1588
1589 attr.cap.max_send_wr = cmd.max_send_wr;
1590 attr.cap.max_recv_wr = cmd.max_recv_wr;
1591 attr.cap.max_send_sge = cmd.max_send_sge;
1592 attr.cap.max_recv_sge = cmd.max_recv_sge;
1593 attr.cap.max_inline_data = cmd.max_inline_data;
1594
1595 obj->uevent.events_reported = 0;
1596 INIT_LIST_HEAD(&obj->uevent.event_list);
1597 INIT_LIST_HEAD(&obj->mcast_list);
1598
1599 if (cmd.qp_type == IB_QPT_XRC_TGT)
1600 qp = ib_create_qp(pd, &attr);
1601 else
1602 qp = device->create_qp(pd, &attr, &udata);
1603
1604 if (IS_ERR(qp)) {
1605 ret = PTR_ERR(qp);
1606 goto err_put;
1607 }
1608
1609 if (cmd.qp_type != IB_QPT_XRC_TGT) {
1610 qp->real_qp = qp;
1611 qp->device = device;
1612 qp->pd = pd;
1613 qp->send_cq = attr.send_cq;
1614 qp->recv_cq = attr.recv_cq;
1615 qp->srq = attr.srq;
1616 qp->event_handler = attr.event_handler;
1617 qp->qp_context = attr.qp_context;
1618 qp->qp_type = attr.qp_type;
1619 atomic_set(&qp->usecnt, 0);
1620 atomic_inc(&pd->usecnt);
1621 atomic_inc(&attr.send_cq->usecnt);
1622 if (attr.recv_cq)
1623 atomic_inc(&attr.recv_cq->usecnt);
1624 if (attr.srq)
1625 atomic_inc(&attr.srq->usecnt);
1626 }
1627 qp->uobject = &obj->uevent.uobject;
1628
1629 obj->uevent.uobject.object = qp;
1630 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1631 if (ret)
1632 goto err_destroy;
1633
1634 memset(&resp, 0, sizeof resp);
1635 resp.qpn = qp->qp_num;
1636 resp.qp_handle = obj->uevent.uobject.id;
1637 resp.max_recv_sge = attr.cap.max_recv_sge;
1638 resp.max_send_sge = attr.cap.max_send_sge;
1639 resp.max_recv_wr = attr.cap.max_recv_wr;
1640 resp.max_send_wr = attr.cap.max_send_wr;
1641 resp.max_inline_data = attr.cap.max_inline_data;
1642
1643 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1644 &resp, sizeof resp)) {
1645 ret = -EFAULT;
1646 goto err_copy;
1647 }
1648
1649 if (xrcd) {
1650 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
1651 uobject);
1652 atomic_inc(&obj->uxrcd->refcnt);
1653 put_xrcd_read(xrcd_uobj);
1654 }
1655
1656 if (pd)
1657 put_pd_read(pd);
1658 if (scq)
1659 put_cq_read(scq);
1660 if (rcq && rcq != scq)
1661 put_cq_read(rcq);
1662 if (srq)
1663 put_srq_read(srq);
1664
1665 mutex_lock(&file->mutex);
1666 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
1667 mutex_unlock(&file->mutex);
1668
1669 obj->uevent.uobject.live = 1;
1670
1671 up_write(&obj->uevent.uobject.mutex);
1672
1673 return in_len;
1674
1675err_copy:
1676 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1677
1678err_destroy:
1679 ib_destroy_qp(qp);
1680
1681err_put:
1682 if (xrcd)
1683 put_xrcd_read(xrcd_uobj);
1684 if (pd)
1685 put_pd_read(pd);
1686 if (scq)
1687 put_cq_read(scq);
1688 if (rcq && rcq != scq)
1689 put_cq_read(rcq);
1690 if (srq)
1691 put_srq_read(srq);
1692
1693 put_uobj_write(&obj->uevent.uobject);
1694 return ret;
1695}
1696
1697ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
1698 const char __user *buf, int in_len, int out_len)
1699{
1700 struct ib_uverbs_open_qp cmd;
1701 struct ib_uverbs_create_qp_resp resp;
1702 struct ib_udata udata;
1703 struct ib_uqp_object *obj;
1704 struct ib_xrcd *xrcd;
1705 struct ib_uobject *uninitialized_var(xrcd_uobj);
1706 struct ib_qp *qp;
1707 struct ib_qp_open_attr attr;
1708 int ret;
1709
1710 if (out_len < sizeof resp)
1711 return -ENOSPC;
1712
1713 if (copy_from_user(&cmd, buf, sizeof cmd))
1714 return -EFAULT;
1715
1716 INIT_UDATA(&udata, buf + sizeof cmd,
1717 (unsigned long) cmd.response + sizeof resp,
1718 in_len - sizeof cmd, out_len - sizeof resp);
1719
1720 obj = kmalloc(sizeof *obj, GFP_KERNEL);
1721 if (!obj)
1722 return -ENOMEM;
1723
1724 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
1725 down_write(&obj->uevent.uobject.mutex);
1726
1727 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
1728 if (!xrcd) {
1729 ret = -EINVAL;
1730 goto err_put;
1731 }
1732
1733 attr.event_handler = ib_uverbs_qp_event_handler;
1734 attr.qp_context = file;
1735 attr.qp_num = cmd.qpn;
1736 attr.qp_type = cmd.qp_type;
1737
1738 obj->uevent.events_reported = 0;
1739 INIT_LIST_HEAD(&obj->uevent.event_list);
1740 INIT_LIST_HEAD(&obj->mcast_list);
1741
1742 qp = ib_open_qp(xrcd, &attr);
1743 if (IS_ERR(qp)) {
1744 ret = PTR_ERR(qp);
1745 goto err_put;
1746 }
1747
1748 qp->uobject = &obj->uevent.uobject;
1749
1750 obj->uevent.uobject.object = qp;
1751 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1752 if (ret)
1753 goto err_destroy;
1754
1755 memset(&resp, 0, sizeof resp);
1756 resp.qpn = qp->qp_num;
1757 resp.qp_handle = obj->uevent.uobject.id;
1758
1759 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1760 &resp, sizeof resp)) {
1761 ret = -EFAULT;
1762 goto err_remove;
1763 }
1764
1765 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
1766 atomic_inc(&obj->uxrcd->refcnt);
1767 put_xrcd_read(xrcd_uobj);
1768
1769 mutex_lock(&file->mutex);
1770 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
1771 mutex_unlock(&file->mutex);
1772
1773 obj->uevent.uobject.live = 1;
1774
1775 up_write(&obj->uevent.uobject.mutex);
1776
1777 return in_len;
1778
1779err_remove:
1780 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1781
1782err_destroy:
1783 ib_destroy_qp(qp);
1784
1785err_put:
1786 put_xrcd_read(xrcd_uobj);
1787 put_uobj_write(&obj->uevent.uobject);
1788 return ret;
1789}
1790
1791ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
1792 const char __user *buf, int in_len,
1793 int out_len)
1794{
1795 struct ib_uverbs_query_qp cmd;
1796 struct ib_uverbs_query_qp_resp resp;
1797 struct ib_qp *qp;
1798 struct ib_qp_attr *attr;
1799 struct ib_qp_init_attr *init_attr;
1800 int ret;
1801
1802 if (copy_from_user(&cmd, buf, sizeof cmd))
1803 return -EFAULT;
1804
1805 attr = kmalloc(sizeof *attr, GFP_KERNEL);
1806 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
1807 if (!attr || !init_attr) {
1808 ret = -ENOMEM;
1809 goto out;
1810 }
1811
1812 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1813 if (!qp) {
1814 ret = -EINVAL;
1815 goto out;
1816 }
1817
1818 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
1819
1820 put_qp_read(qp);
1821
1822 if (ret)
1823 goto out;
1824
1825 memset(&resp, 0, sizeof resp);
1826
1827 resp.qp_state = attr->qp_state;
1828 resp.cur_qp_state = attr->cur_qp_state;
1829 resp.path_mtu = attr->path_mtu;
1830 resp.path_mig_state = attr->path_mig_state;
1831 resp.qkey = attr->qkey;
1832 resp.rq_psn = attr->rq_psn;
1833 resp.sq_psn = attr->sq_psn;
1834 resp.dest_qp_num = attr->dest_qp_num;
1835 resp.qp_access_flags = attr->qp_access_flags;
1836 resp.pkey_index = attr->pkey_index;
1837 resp.alt_pkey_index = attr->alt_pkey_index;
1838 resp.sq_draining = attr->sq_draining;
1839 resp.max_rd_atomic = attr->max_rd_atomic;
1840 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic;
1841 resp.min_rnr_timer = attr->min_rnr_timer;
1842 resp.port_num = attr->port_num;
1843 resp.timeout = attr->timeout;
1844 resp.retry_cnt = attr->retry_cnt;
1845 resp.rnr_retry = attr->rnr_retry;
1846 resp.alt_port_num = attr->alt_port_num;
1847 resp.alt_timeout = attr->alt_timeout;
1848
1849 memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16);
1850 resp.dest.flow_label = attr->ah_attr.grh.flow_label;
1851 resp.dest.sgid_index = attr->ah_attr.grh.sgid_index;
1852 resp.dest.hop_limit = attr->ah_attr.grh.hop_limit;
1853 resp.dest.traffic_class = attr->ah_attr.grh.traffic_class;
1854 resp.dest.dlid = attr->ah_attr.dlid;
1855 resp.dest.sl = attr->ah_attr.sl;
1856 resp.dest.src_path_bits = attr->ah_attr.src_path_bits;
1857 resp.dest.static_rate = attr->ah_attr.static_rate;
1858 resp.dest.is_global = !!(attr->ah_attr.ah_flags & IB_AH_GRH);
1859 resp.dest.port_num = attr->ah_attr.port_num;
1860
1861 memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16);
1862 resp.alt_dest.flow_label = attr->alt_ah_attr.grh.flow_label;
1863 resp.alt_dest.sgid_index = attr->alt_ah_attr.grh.sgid_index;
1864 resp.alt_dest.hop_limit = attr->alt_ah_attr.grh.hop_limit;
1865 resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class;
1866 resp.alt_dest.dlid = attr->alt_ah_attr.dlid;
1867 resp.alt_dest.sl = attr->alt_ah_attr.sl;
1868 resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits;
1869 resp.alt_dest.static_rate = attr->alt_ah_attr.static_rate;
1870 resp.alt_dest.is_global = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH);
1871 resp.alt_dest.port_num = attr->alt_ah_attr.port_num;
1872
1873 resp.max_send_wr = init_attr->cap.max_send_wr;
1874 resp.max_recv_wr = init_attr->cap.max_recv_wr;
1875 resp.max_send_sge = init_attr->cap.max_send_sge;
1876 resp.max_recv_sge = init_attr->cap.max_recv_sge;
1877 resp.max_inline_data = init_attr->cap.max_inline_data;
1878 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1879
1880 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1881 &resp, sizeof resp))
1882 ret = -EFAULT;
1883
1884out:
1885 kfree(attr);
1886 kfree(init_attr);
1887
1888 return ret ? ret : in_len;
1889}
1890
1891
1892static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
1893{
1894 switch (qp_type) {
1895 case IB_QPT_XRC_INI:
1896 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER);
1897 case IB_QPT_XRC_TGT:
1898 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT |
1899 IB_QP_RNR_RETRY);
1900 default:
1901 return mask;
1902 }
1903}
1904
1905ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
1906 const char __user *buf, int in_len,
1907 int out_len)
1908{
1909 struct ib_uverbs_modify_qp cmd;
1910 struct ib_udata udata;
1911 struct ib_qp *qp;
1912 struct ib_qp_attr *attr;
1913 int ret;
1914
1915 if (copy_from_user(&cmd, buf, sizeof cmd))
1916 return -EFAULT;
1917
1918 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
1919 out_len);
1920
1921 attr = kmalloc(sizeof *attr, GFP_KERNEL);
1922 if (!attr)
1923 return -ENOMEM;
1924
1925 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1926 if (!qp) {
1927 ret = -EINVAL;
1928 goto out;
1929 }
1930
1931 attr->qp_state = cmd.qp_state;
1932 attr->cur_qp_state = cmd.cur_qp_state;
1933 attr->path_mtu = cmd.path_mtu;
1934 attr->path_mig_state = cmd.path_mig_state;
1935 attr->qkey = cmd.qkey;
1936 attr->rq_psn = cmd.rq_psn;
1937 attr->sq_psn = cmd.sq_psn;
1938 attr->dest_qp_num = cmd.dest_qp_num;
1939 attr->qp_access_flags = cmd.qp_access_flags;
1940 attr->pkey_index = cmd.pkey_index;
1941 attr->alt_pkey_index = cmd.alt_pkey_index;
1942 attr->en_sqd_async_notify = cmd.en_sqd_async_notify;
1943 attr->max_rd_atomic = cmd.max_rd_atomic;
1944 attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic;
1945 attr->min_rnr_timer = cmd.min_rnr_timer;
1946 attr->port_num = cmd.port_num;
1947 attr->timeout = cmd.timeout;
1948 attr->retry_cnt = cmd.retry_cnt;
1949 attr->rnr_retry = cmd.rnr_retry;
1950 attr->alt_port_num = cmd.alt_port_num;
1951 attr->alt_timeout = cmd.alt_timeout;
1952
1953 memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16);
1954 attr->ah_attr.grh.flow_label = cmd.dest.flow_label;
1955 attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index;
1956 attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit;
1957 attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class;
1958 attr->ah_attr.dlid = cmd.dest.dlid;
1959 attr->ah_attr.sl = cmd.dest.sl;
1960 attr->ah_attr.src_path_bits = cmd.dest.src_path_bits;
1961 attr->ah_attr.static_rate = cmd.dest.static_rate;
1962 attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0;
1963 attr->ah_attr.port_num = cmd.dest.port_num;
1964
1965 memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16);
1966 attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label;
1967 attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index;
1968 attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit;
1969 attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class;
1970 attr->alt_ah_attr.dlid = cmd.alt_dest.dlid;
1971 attr->alt_ah_attr.sl = cmd.alt_dest.sl;
1972 attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits;
1973 attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate;
1974 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
1975 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num;
1976
1977 if (qp->real_qp == qp) {
1978 ret = qp->device->modify_qp(qp, attr,
1979 modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
1980 } else {
1981 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
1982 }
1983
1984 put_qp_read(qp);
1985
1986 if (ret)
1987 goto out;
1988
1989 ret = in_len;
1990
1991out:
1992 kfree(attr);
1993
1994 return ret;
1995}
1996
1997ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
1998 const char __user *buf, int in_len,
1999 int out_len)
2000{
2001 struct ib_uverbs_destroy_qp cmd;
2002 struct ib_uverbs_destroy_qp_resp resp;
2003 struct ib_uobject *uobj;
2004 struct ib_qp *qp;
2005 struct ib_uqp_object *obj;
2006 int ret = -EINVAL;
2007
2008 if (copy_from_user(&cmd, buf, sizeof cmd))
2009 return -EFAULT;
2010
2011 memset(&resp, 0, sizeof resp);
2012
2013 uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext);
2014 if (!uobj)
2015 return -EINVAL;
2016 qp = uobj->object;
2017 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
2018
2019 if (!list_empty(&obj->mcast_list)) {
2020 put_uobj_write(uobj);
2021 return -EBUSY;
2022 }
2023
2024 ret = ib_destroy_qp(qp);
2025 if (!ret)
2026 uobj->live = 0;
2027
2028 put_uobj_write(uobj);
2029
2030 if (ret)
2031 return ret;
2032
2033 if (obj->uxrcd)
2034 atomic_dec(&obj->uxrcd->refcnt);
2035
2036 idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
2037
2038 mutex_lock(&file->mutex);
2039 list_del(&uobj->list);
2040 mutex_unlock(&file->mutex);
2041
2042 ib_uverbs_release_uevent(file, &obj->uevent);
2043
2044 resp.events_reported = obj->uevent.events_reported;
2045
2046 put_uobj(uobj);
2047
2048 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2049 &resp, sizeof resp))
2050 return -EFAULT;
2051
2052 return in_len;
2053}
2054
2055ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2056 const char __user *buf, int in_len,
2057 int out_len)
2058{
2059 struct ib_uverbs_post_send cmd;
2060 struct ib_uverbs_post_send_resp resp;
2061 struct ib_uverbs_send_wr *user_wr;
2062 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr;
2063 struct ib_qp *qp;
2064 int i, sg_ind;
2065 int is_ud;
2066 ssize_t ret = -EINVAL;
2067
2068 if (copy_from_user(&cmd, buf, sizeof cmd))
2069 return -EFAULT;
2070
2071 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count +
2072 cmd.sge_count * sizeof (struct ib_uverbs_sge))
2073 return -EINVAL;
2074
2075 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr))
2076 return -EINVAL;
2077
2078 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
2079 if (!user_wr)
2080 return -ENOMEM;
2081
2082 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2083 if (!qp)
2084 goto out;
2085
2086 is_ud = qp->qp_type == IB_QPT_UD;
2087 sg_ind = 0;
2088 last = NULL;
2089 for (i = 0; i < cmd.wr_count; ++i) {
2090 if (copy_from_user(user_wr,
2091 buf + sizeof cmd + i * cmd.wqe_size,
2092 cmd.wqe_size)) {
2093 ret = -EFAULT;
2094 goto out_put;
2095 }
2096
2097 if (user_wr->num_sge + sg_ind > cmd.sge_count) {
2098 ret = -EINVAL;
2099 goto out_put;
2100 }
2101
2102 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2103 user_wr->num_sge * sizeof (struct ib_sge),
2104 GFP_KERNEL);
2105 if (!next) {
2106 ret = -ENOMEM;
2107 goto out_put;
2108 }
2109
2110 if (!last)
2111 wr = next;
2112 else
2113 last->next = next;
2114 last = next;
2115
2116 next->next = NULL;
2117 next->wr_id = user_wr->wr_id;
2118 next->num_sge = user_wr->num_sge;
2119 next->opcode = user_wr->opcode;
2120 next->send_flags = user_wr->send_flags;
2121
2122 if (is_ud) {
2123 next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
2124 file->ucontext);
2125 if (!next->wr.ud.ah) {
2126 ret = -EINVAL;
2127 goto out_put;
2128 }
2129 next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn;
2130 next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey;
2131 } else {
2132 switch (next->opcode) {
2133 case IB_WR_RDMA_WRITE_WITH_IMM:
2134 next->ex.imm_data =
2135 (__be32 __force) user_wr->ex.imm_data;
2136 case IB_WR_RDMA_WRITE:
2137 case IB_WR_RDMA_READ:
2138 next->wr.rdma.remote_addr =
2139 user_wr->wr.rdma.remote_addr;
2140 next->wr.rdma.rkey =
2141 user_wr->wr.rdma.rkey;
2142 break;
2143 case IB_WR_SEND_WITH_IMM:
2144 next->ex.imm_data =
2145 (__be32 __force) user_wr->ex.imm_data;
2146 break;
2147 case IB_WR_SEND_WITH_INV:
2148 next->ex.invalidate_rkey =
2149 user_wr->ex.invalidate_rkey;
2150 break;
2151 case IB_WR_ATOMIC_CMP_AND_SWP:
2152 case IB_WR_ATOMIC_FETCH_AND_ADD:
2153 next->wr.atomic.remote_addr =
2154 user_wr->wr.atomic.remote_addr;
2155 next->wr.atomic.compare_add =
2156 user_wr->wr.atomic.compare_add;
2157 next->wr.atomic.swap = user_wr->wr.atomic.swap;
2158 next->wr.atomic.rkey = user_wr->wr.atomic.rkey;
2159 break;
2160 default:
2161 break;
2162 }
2163 }
2164
2165 if (next->num_sge) {
2166 next->sg_list = (void *) next +
2167 ALIGN(sizeof *next, sizeof (struct ib_sge));
2168 if (copy_from_user(next->sg_list,
2169 buf + sizeof cmd +
2170 cmd.wr_count * cmd.wqe_size +
2171 sg_ind * sizeof (struct ib_sge),
2172 next->num_sge * sizeof (struct ib_sge))) {
2173 ret = -EFAULT;
2174 goto out_put;
2175 }
2176 sg_ind += next->num_sge;
2177 } else
2178 next->sg_list = NULL;
2179 }
2180
2181 resp.bad_wr = 0;
2182 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr);
2183 if (ret)
2184 for (next = wr; next; next = next->next) {
2185 ++resp.bad_wr;
2186 if (next == bad_wr)
2187 break;
2188 }
2189
2190 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2191 &resp, sizeof resp))
2192 ret = -EFAULT;
2193
2194out_put:
2195 put_qp_read(qp);
2196
2197 while (wr) {
2198 if (is_ud && wr->wr.ud.ah)
2199 put_ah_read(wr->wr.ud.ah);
2200 next = wr->next;
2201 kfree(wr);
2202 wr = next;
2203 }
2204
2205out:
2206 kfree(user_wr);
2207
2208 return ret ? ret : in_len;
2209}
2210
2211static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
2212 int in_len,
2213 u32 wr_count,
2214 u32 sge_count,
2215 u32 wqe_size)
2216{
2217 struct ib_uverbs_recv_wr *user_wr;
2218 struct ib_recv_wr *wr = NULL, *last, *next;
2219 int sg_ind;
2220 int i;
2221 int ret;
2222
2223 if (in_len < wqe_size * wr_count +
2224 sge_count * sizeof (struct ib_uverbs_sge))
2225 return ERR_PTR(-EINVAL);
2226
2227 if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
2228 return ERR_PTR(-EINVAL);
2229
2230 user_wr = kmalloc(wqe_size, GFP_KERNEL);
2231 if (!user_wr)
2232 return ERR_PTR(-ENOMEM);
2233
2234 sg_ind = 0;
2235 last = NULL;
2236 for (i = 0; i < wr_count; ++i) {
2237 if (copy_from_user(user_wr, buf + i * wqe_size,
2238 wqe_size)) {
2239 ret = -EFAULT;
2240 goto err;
2241 }
2242
2243 if (user_wr->num_sge + sg_ind > sge_count) {
2244 ret = -EINVAL;
2245 goto err;
2246 }
2247
2248 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2249 user_wr->num_sge * sizeof (struct ib_sge),
2250 GFP_KERNEL);
2251 if (!next) {
2252 ret = -ENOMEM;
2253 goto err;
2254 }
2255
2256 if (!last)
2257 wr = next;
2258 else
2259 last->next = next;
2260 last = next;
2261
2262 next->next = NULL;
2263 next->wr_id = user_wr->wr_id;
2264 next->num_sge = user_wr->num_sge;
2265
2266 if (next->num_sge) {
2267 next->sg_list = (void *) next +
2268 ALIGN(sizeof *next, sizeof (struct ib_sge));
2269 if (copy_from_user(next->sg_list,
2270 buf + wr_count * wqe_size +
2271 sg_ind * sizeof (struct ib_sge),
2272 next->num_sge * sizeof (struct ib_sge))) {
2273 ret = -EFAULT;
2274 goto err;
2275 }
2276 sg_ind += next->num_sge;
2277 } else
2278 next->sg_list = NULL;
2279 }
2280
2281 kfree(user_wr);
2282 return wr;
2283
2284err:
2285 kfree(user_wr);
2286
2287 while (wr) {
2288 next = wr->next;
2289 kfree(wr);
2290 wr = next;
2291 }
2292
2293 return ERR_PTR(ret);
2294}
2295
2296ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
2297 const char __user *buf, int in_len,
2298 int out_len)
2299{
2300 struct ib_uverbs_post_recv cmd;
2301 struct ib_uverbs_post_recv_resp resp;
2302 struct ib_recv_wr *wr, *next, *bad_wr;
2303 struct ib_qp *qp;
2304 ssize_t ret = -EINVAL;
2305
2306 if (copy_from_user(&cmd, buf, sizeof cmd))
2307 return -EFAULT;
2308
2309 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2310 in_len - sizeof cmd, cmd.wr_count,
2311 cmd.sge_count, cmd.wqe_size);
2312 if (IS_ERR(wr))
2313 return PTR_ERR(wr);
2314
2315 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2316 if (!qp)
2317 goto out;
2318
2319 resp.bad_wr = 0;
2320 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr);
2321
2322 put_qp_read(qp);
2323
2324 if (ret)
2325 for (next = wr; next; next = next->next) {
2326 ++resp.bad_wr;
2327 if (next == bad_wr)
2328 break;
2329 }
2330
2331 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2332 &resp, sizeof resp))
2333 ret = -EFAULT;
2334
2335out:
2336 while (wr) {
2337 next = wr->next;
2338 kfree(wr);
2339 wr = next;
2340 }
2341
2342 return ret ? ret : in_len;
2343}
2344
2345ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
2346 const char __user *buf, int in_len,
2347 int out_len)
2348{
2349 struct ib_uverbs_post_srq_recv cmd;
2350 struct ib_uverbs_post_srq_recv_resp resp;
2351 struct ib_recv_wr *wr, *next, *bad_wr;
2352 struct ib_srq *srq;
2353 ssize_t ret = -EINVAL;
2354
2355 if (copy_from_user(&cmd, buf, sizeof cmd))
2356 return -EFAULT;
2357
2358 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2359 in_len - sizeof cmd, cmd.wr_count,
2360 cmd.sge_count, cmd.wqe_size);
2361 if (IS_ERR(wr))
2362 return PTR_ERR(wr);
2363
2364 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
2365 if (!srq)
2366 goto out;
2367
2368 resp.bad_wr = 0;
2369 ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
2370
2371 put_srq_read(srq);
2372
2373 if (ret)
2374 for (next = wr; next; next = next->next) {
2375 ++resp.bad_wr;
2376 if (next == bad_wr)
2377 break;
2378 }
2379
2380 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2381 &resp, sizeof resp))
2382 ret = -EFAULT;
2383
2384out:
2385 while (wr) {
2386 next = wr->next;
2387 kfree(wr);
2388 wr = next;
2389 }
2390
2391 return ret ? ret : in_len;
2392}
2393
2394ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
2395 const char __user *buf, int in_len,
2396 int out_len)
2397{
2398 struct ib_uverbs_create_ah cmd;
2399 struct ib_uverbs_create_ah_resp resp;
2400 struct ib_uobject *uobj;
2401 struct ib_pd *pd;
2402 struct ib_ah *ah;
2403 struct ib_ah_attr attr;
2404 int ret;
2405
2406 if (out_len < sizeof resp)
2407 return -ENOSPC;
2408
2409 if (copy_from_user(&cmd, buf, sizeof cmd))
2410 return -EFAULT;
2411
2412 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
2413 if (!uobj)
2414 return -ENOMEM;
2415
2416 init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_class);
2417 down_write(&uobj->mutex);
2418
2419 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
2420 if (!pd) {
2421 ret = -EINVAL;
2422 goto err;
2423 }
2424
2425 attr.dlid = cmd.attr.dlid;
2426 attr.sl = cmd.attr.sl;
2427 attr.src_path_bits = cmd.attr.src_path_bits;
2428 attr.static_rate = cmd.attr.static_rate;
2429 attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0;
2430 attr.port_num = cmd.attr.port_num;
2431 attr.grh.flow_label = cmd.attr.grh.flow_label;
2432 attr.grh.sgid_index = cmd.attr.grh.sgid_index;
2433 attr.grh.hop_limit = cmd.attr.grh.hop_limit;
2434 attr.grh.traffic_class = cmd.attr.grh.traffic_class;
2435 memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16);
2436
2437 ah = ib_create_ah(pd, &attr);
2438 if (IS_ERR(ah)) {
2439 ret = PTR_ERR(ah);
2440 goto err_put;
2441 }
2442
2443 ah->uobject = uobj;
2444 uobj->object = ah;
2445
2446 ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj);
2447 if (ret)
2448 goto err_destroy;
2449
2450 resp.ah_handle = uobj->id;
2451
2452 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2453 &resp, sizeof resp)) {
2454 ret = -EFAULT;
2455 goto err_copy;
2456 }
2457
2458 put_pd_read(pd);
2459
2460 mutex_lock(&file->mutex);
2461 list_add_tail(&uobj->list, &file->ucontext->ah_list);
2462 mutex_unlock(&file->mutex);
2463
2464 uobj->live = 1;
2465
2466 up_write(&uobj->mutex);
2467
2468 return in_len;
2469
2470err_copy:
2471 idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
2472
2473err_destroy:
2474 ib_destroy_ah(ah);
2475
2476err_put:
2477 put_pd_read(pd);
2478
2479err:
2480 put_uobj_write(uobj);
2481 return ret;
2482}
2483
2484ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
2485 const char __user *buf, int in_len, int out_len)
2486{
2487 struct ib_uverbs_destroy_ah cmd;
2488 struct ib_ah *ah;
2489 struct ib_uobject *uobj;
2490 int ret;
2491
2492 if (copy_from_user(&cmd, buf, sizeof cmd))
2493 return -EFAULT;
2494
2495 uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext);
2496 if (!uobj)
2497 return -EINVAL;
2498 ah = uobj->object;
2499
2500 ret = ib_destroy_ah(ah);
2501 if (!ret)
2502 uobj->live = 0;
2503
2504 put_uobj_write(uobj);
2505
2506 if (ret)
2507 return ret;
2508
2509 idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
2510
2511 mutex_lock(&file->mutex);
2512 list_del(&uobj->list);
2513 mutex_unlock(&file->mutex);
2514
2515 put_uobj(uobj);
2516
2517 return in_len;
2518}
2519
2520ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
2521 const char __user *buf, int in_len,
2522 int out_len)
2523{
2524 struct ib_uverbs_attach_mcast cmd;
2525 struct ib_qp *qp;
2526 struct ib_uqp_object *obj;
2527 struct ib_uverbs_mcast_entry *mcast;
2528 int ret;
2529
2530 if (copy_from_user(&cmd, buf, sizeof cmd))
2531 return -EFAULT;
2532
2533 qp = idr_write_qp(cmd.qp_handle, file->ucontext);
2534 if (!qp)
2535 return -EINVAL;
2536
2537 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2538
2539 list_for_each_entry(mcast, &obj->mcast_list, list)
2540 if (cmd.mlid == mcast->lid &&
2541 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2542 ret = 0;
2543 goto out_put;
2544 }
2545
2546 mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
2547 if (!mcast) {
2548 ret = -ENOMEM;
2549 goto out_put;
2550 }
2551
2552 mcast->lid = cmd.mlid;
2553 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
2554
2555 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
2556 if (!ret)
2557 list_add_tail(&mcast->list, &obj->mcast_list);
2558 else
2559 kfree(mcast);
2560
2561out_put:
2562 put_qp_write(qp);
2563
2564 return ret ? ret : in_len;
2565}
2566
2567ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
2568 const char __user *buf, int in_len,
2569 int out_len)
2570{
2571 struct ib_uverbs_detach_mcast cmd;
2572 struct ib_uqp_object *obj;
2573 struct ib_qp *qp;
2574 struct ib_uverbs_mcast_entry *mcast;
2575 int ret = -EINVAL;
2576
2577 if (copy_from_user(&cmd, buf, sizeof cmd))
2578 return -EFAULT;
2579
2580 qp = idr_write_qp(cmd.qp_handle, file->ucontext);
2581 if (!qp)
2582 return -EINVAL;
2583
2584 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
2585 if (ret)
2586 goto out_put;
2587
2588 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2589
2590 list_for_each_entry(mcast, &obj->mcast_list, list)
2591 if (cmd.mlid == mcast->lid &&
2592 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2593 list_del(&mcast->list);
2594 kfree(mcast);
2595 break;
2596 }
2597
2598out_put:
2599 put_qp_write(qp);
2600
2601 return ret ? ret : in_len;
2602}
2603
2604#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
2605static int kern_spec_to_ib_spec(struct ib_kern_spec *kern_spec,
2606 union ib_flow_spec *ib_spec)
2607{
2608 ib_spec->type = kern_spec->type;
2609
2610 switch (ib_spec->type) {
2611 case IB_FLOW_SPEC_ETH:
2612 ib_spec->eth.size = sizeof(struct ib_flow_spec_eth);
2613 if (ib_spec->eth.size != kern_spec->eth.size)
2614 return -EINVAL;
2615 memcpy(&ib_spec->eth.val, &kern_spec->eth.val,
2616 sizeof(struct ib_flow_eth_filter));
2617 memcpy(&ib_spec->eth.mask, &kern_spec->eth.mask,
2618 sizeof(struct ib_flow_eth_filter));
2619 break;
2620 case IB_FLOW_SPEC_IPV4:
2621 ib_spec->ipv4.size = sizeof(struct ib_flow_spec_ipv4);
2622 if (ib_spec->ipv4.size != kern_spec->ipv4.size)
2623 return -EINVAL;
2624 memcpy(&ib_spec->ipv4.val, &kern_spec->ipv4.val,
2625 sizeof(struct ib_flow_ipv4_filter));
2626 memcpy(&ib_spec->ipv4.mask, &kern_spec->ipv4.mask,
2627 sizeof(struct ib_flow_ipv4_filter));
2628 break;
2629 case IB_FLOW_SPEC_TCP:
2630 case IB_FLOW_SPEC_UDP:
2631 ib_spec->tcp_udp.size = sizeof(struct ib_flow_spec_tcp_udp);
2632 if (ib_spec->tcp_udp.size != kern_spec->tcp_udp.size)
2633 return -EINVAL;
2634 memcpy(&ib_spec->tcp_udp.val, &kern_spec->tcp_udp.val,
2635 sizeof(struct ib_flow_tcp_udp_filter));
2636 memcpy(&ib_spec->tcp_udp.mask, &kern_spec->tcp_udp.mask,
2637 sizeof(struct ib_flow_tcp_udp_filter));
2638 break;
2639 default:
2640 return -EINVAL;
2641 }
2642 return 0;
2643}
2644
2645ssize_t ib_uverbs_create_flow(struct ib_uverbs_file *file,
2646 const char __user *buf, int in_len,
2647 int out_len)
2648{
2649 struct ib_uverbs_create_flow cmd;
2650 struct ib_uverbs_create_flow_resp resp;
2651 struct ib_uobject *uobj;
2652 struct ib_flow *flow_id;
2653 struct ib_kern_flow_attr *kern_flow_attr;
2654 struct ib_flow_attr *flow_attr;
2655 struct ib_qp *qp;
2656 int err = 0;
2657 void *kern_spec;
2658 void *ib_spec;
2659 int i;
2660 int kern_attr_size;
2661
2662 if (out_len < sizeof(resp))
2663 return -ENOSPC;
2664
2665 if (copy_from_user(&cmd, buf, sizeof(cmd)))
2666 return -EFAULT;
2667
2668 if (cmd.comp_mask)
2669 return -EINVAL;
2670
2671 if ((cmd.flow_attr.type == IB_FLOW_ATTR_SNIFFER &&
2672 !capable(CAP_NET_ADMIN)) || !capable(CAP_NET_RAW))
2673 return -EPERM;
2674
2675 if (cmd.flow_attr.num_of_specs < 0 ||
2676 cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
2677 return -EINVAL;
2678
2679 kern_attr_size = cmd.flow_attr.size - sizeof(cmd) -
2680 sizeof(struct ib_uverbs_cmd_hdr_ex);
2681
2682 if (cmd.flow_attr.size < 0 || cmd.flow_attr.size > in_len ||
2683 kern_attr_size < 0 || kern_attr_size >
2684 (cmd.flow_attr.num_of_specs * sizeof(struct ib_kern_spec)))
2685 return -EINVAL;
2686
2687 if (cmd.flow_attr.num_of_specs) {
2688 kern_flow_attr = kmalloc(cmd.flow_attr.size, GFP_KERNEL);
2689 if (!kern_flow_attr)
2690 return -ENOMEM;
2691
2692 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr));
2693 if (copy_from_user(kern_flow_attr + 1, buf + sizeof(cmd),
2694 kern_attr_size)) {
2695 err = -EFAULT;
2696 goto err_free_attr;
2697 }
2698 } else {
2699 kern_flow_attr = &cmd.flow_attr;
2700 kern_attr_size = sizeof(cmd.flow_attr);
2701 }
2702
2703 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
2704 if (!uobj) {
2705 err = -ENOMEM;
2706 goto err_free_attr;
2707 }
2708 init_uobj(uobj, 0, file->ucontext, &rule_lock_class);
2709 down_write(&uobj->mutex);
2710
2711 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2712 if (!qp) {
2713 err = -EINVAL;
2714 goto err_uobj;
2715 }
2716
2717 flow_attr = kmalloc(cmd.flow_attr.size, GFP_KERNEL);
2718 if (!flow_attr) {
2719 err = -ENOMEM;
2720 goto err_put;
2721 }
2722
2723 flow_attr->type = kern_flow_attr->type;
2724 flow_attr->priority = kern_flow_attr->priority;
2725 flow_attr->num_of_specs = kern_flow_attr->num_of_specs;
2726 flow_attr->port = kern_flow_attr->port;
2727 flow_attr->flags = kern_flow_attr->flags;
2728 flow_attr->size = sizeof(*flow_attr);
2729
2730 kern_spec = kern_flow_attr + 1;
2731 ib_spec = flow_attr + 1;
2732 for (i = 0; i < flow_attr->num_of_specs && kern_attr_size > 0; i++) {
2733 err = kern_spec_to_ib_spec(kern_spec, ib_spec);
2734 if (err)
2735 goto err_free;
2736 flow_attr->size +=
2737 ((union ib_flow_spec *) ib_spec)->size;
2738 kern_attr_size -= ((struct ib_kern_spec *) kern_spec)->size;
2739 kern_spec += ((struct ib_kern_spec *) kern_spec)->size;
2740 ib_spec += ((union ib_flow_spec *) ib_spec)->size;
2741 }
2742 if (kern_attr_size) {
2743 pr_warn("create flow failed, %d bytes left from uverb cmd\n",
2744 kern_attr_size);
2745 goto err_free;
2746 }
2747 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
2748 if (IS_ERR(flow_id)) {
2749 err = PTR_ERR(flow_id);
2750 goto err_free;
2751 }
2752 flow_id->qp = qp;
2753 flow_id->uobject = uobj;
2754 uobj->object = flow_id;
2755
2756 err = idr_add_uobj(&ib_uverbs_rule_idr, uobj);
2757 if (err)
2758 goto destroy_flow;
2759
2760 memset(&resp, 0, sizeof(resp));
2761 resp.flow_handle = uobj->id;
2762
2763 if (copy_to_user((void __user *)(unsigned long) cmd.response,
2764 &resp, sizeof(resp))) {
2765 err = -EFAULT;
2766 goto err_copy;
2767 }
2768
2769 put_qp_read(qp);
2770 mutex_lock(&file->mutex);
2771 list_add_tail(&uobj->list, &file->ucontext->rule_list);
2772 mutex_unlock(&file->mutex);
2773
2774 uobj->live = 1;
2775
2776 up_write(&uobj->mutex);
2777 kfree(flow_attr);
2778 if (cmd.flow_attr.num_of_specs)
2779 kfree(kern_flow_attr);
2780 return in_len;
2781err_copy:
2782 idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
2783destroy_flow:
2784 ib_destroy_flow(flow_id);
2785err_free:
2786 kfree(flow_attr);
2787err_put:
2788 put_qp_read(qp);
2789err_uobj:
2790 put_uobj_write(uobj);
2791err_free_attr:
2792 if (cmd.flow_attr.num_of_specs)
2793 kfree(kern_flow_attr);
2794 return err;
2795}
2796
2797ssize_t ib_uverbs_destroy_flow(struct ib_uverbs_file *file,
2798 const char __user *buf, int in_len,
2799 int out_len) {
2800 struct ib_uverbs_destroy_flow cmd;
2801 struct ib_flow *flow_id;
2802 struct ib_uobject *uobj;
2803 int ret;
2804
2805 if (copy_from_user(&cmd, buf, sizeof(cmd)))
2806 return -EFAULT;
2807
2808 uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle,
2809 file->ucontext);
2810 if (!uobj)
2811 return -EINVAL;
2812 flow_id = uobj->object;
2813
2814 ret = ib_destroy_flow(flow_id);
2815 if (!ret)
2816 uobj->live = 0;
2817
2818 put_uobj_write(uobj);
2819
2820 idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
2821
2822 mutex_lock(&file->mutex);
2823 list_del(&uobj->list);
2824 mutex_unlock(&file->mutex);
2825
2826 put_uobj(uobj);
2827
2828 return ret ? ret : in_len;
2829}
2830#endif
2831
2832static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
2833 struct ib_uverbs_create_xsrq *cmd,
2834 struct ib_udata *udata)
2835{
2836 struct ib_uverbs_create_srq_resp resp;
2837 struct ib_usrq_object *obj;
2838 struct ib_pd *pd;
2839 struct ib_srq *srq;
2840 struct ib_uobject *uninitialized_var(xrcd_uobj);
2841 struct ib_srq_init_attr attr;
2842 int ret;
2843
2844 obj = kmalloc(sizeof *obj, GFP_KERNEL);
2845 if (!obj)
2846 return -ENOMEM;
2847
2848 init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_class);
2849 down_write(&obj->uevent.uobject.mutex);
2850
2851 if (cmd->srq_type == IB_SRQT_XRC) {
2852 attr.ext.xrc.xrcd = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj);
2853 if (!attr.ext.xrc.xrcd) {
2854 ret = -EINVAL;
2855 goto err;
2856 }
2857
2858 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
2859 atomic_inc(&obj->uxrcd->refcnt);
2860
2861 attr.ext.xrc.cq = idr_read_cq(cmd->cq_handle, file->ucontext, 0);
2862 if (!attr.ext.xrc.cq) {
2863 ret = -EINVAL;
2864 goto err_put_xrcd;
2865 }
2866 }
2867
2868 pd = idr_read_pd(cmd->pd_handle, file->ucontext);
2869 if (!pd) {
2870 ret = -EINVAL;
2871 goto err_put_cq;
2872 }
2873
2874 attr.event_handler = ib_uverbs_srq_event_handler;
2875 attr.srq_context = file;
2876 attr.srq_type = cmd->srq_type;
2877 attr.attr.max_wr = cmd->max_wr;
2878 attr.attr.max_sge = cmd->max_sge;
2879 attr.attr.srq_limit = cmd->srq_limit;
2880
2881 obj->uevent.events_reported = 0;
2882 INIT_LIST_HEAD(&obj->uevent.event_list);
2883
2884 srq = pd->device->create_srq(pd, &attr, udata);
2885 if (IS_ERR(srq)) {
2886 ret = PTR_ERR(srq);
2887 goto err_put;
2888 }
2889
2890 srq->device = pd->device;
2891 srq->pd = pd;
2892 srq->srq_type = cmd->srq_type;
2893 srq->uobject = &obj->uevent.uobject;
2894 srq->event_handler = attr.event_handler;
2895 srq->srq_context = attr.srq_context;
2896
2897 if (cmd->srq_type == IB_SRQT_XRC) {
2898 srq->ext.xrc.cq = attr.ext.xrc.cq;
2899 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd;
2900 atomic_inc(&attr.ext.xrc.cq->usecnt);
2901 atomic_inc(&attr.ext.xrc.xrcd->usecnt);
2902 }
2903
2904 atomic_inc(&pd->usecnt);
2905 atomic_set(&srq->usecnt, 0);
2906
2907 obj->uevent.uobject.object = srq;
2908 ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
2909 if (ret)
2910 goto err_destroy;
2911
2912 memset(&resp, 0, sizeof resp);
2913 resp.srq_handle = obj->uevent.uobject.id;
2914 resp.max_wr = attr.attr.max_wr;
2915 resp.max_sge = attr.attr.max_sge;
2916 if (cmd->srq_type == IB_SRQT_XRC)
2917 resp.srqn = srq->ext.xrc.srq_num;
2918
2919 if (copy_to_user((void __user *) (unsigned long) cmd->response,
2920 &resp, sizeof resp)) {
2921 ret = -EFAULT;
2922 goto err_copy;
2923 }
2924
2925 if (cmd->srq_type == IB_SRQT_XRC) {
2926 put_uobj_read(xrcd_uobj);
2927 put_cq_read(attr.ext.xrc.cq);
2928 }
2929 put_pd_read(pd);
2930
2931 mutex_lock(&file->mutex);
2932 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->srq_list);
2933 mutex_unlock(&file->mutex);
2934
2935 obj->uevent.uobject.live = 1;
2936
2937 up_write(&obj->uevent.uobject.mutex);
2938
2939 return 0;
2940
2941err_copy:
2942 idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
2943
2944err_destroy:
2945 ib_destroy_srq(srq);
2946
2947err_put:
2948 put_pd_read(pd);
2949
2950err_put_cq:
2951 if (cmd->srq_type == IB_SRQT_XRC)
2952 put_cq_read(attr.ext.xrc.cq);
2953
2954err_put_xrcd:
2955 if (cmd->srq_type == IB_SRQT_XRC) {
2956 atomic_dec(&obj->uxrcd->refcnt);
2957 put_uobj_read(xrcd_uobj);
2958 }
2959
2960err:
2961 put_uobj_write(&obj->uevent.uobject);
2962 return ret;
2963}
2964
2965ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
2966 const char __user *buf, int in_len,
2967 int out_len)
2968{
2969 struct ib_uverbs_create_srq cmd;
2970 struct ib_uverbs_create_xsrq xcmd;
2971 struct ib_uverbs_create_srq_resp resp;
2972 struct ib_udata udata;
2973 int ret;
2974
2975 if (out_len < sizeof resp)
2976 return -ENOSPC;
2977
2978 if (copy_from_user(&cmd, buf, sizeof cmd))
2979 return -EFAULT;
2980
2981 xcmd.response = cmd.response;
2982 xcmd.user_handle = cmd.user_handle;
2983 xcmd.srq_type = IB_SRQT_BASIC;
2984 xcmd.pd_handle = cmd.pd_handle;
2985 xcmd.max_wr = cmd.max_wr;
2986 xcmd.max_sge = cmd.max_sge;
2987 xcmd.srq_limit = cmd.srq_limit;
2988
2989 INIT_UDATA(&udata, buf + sizeof cmd,
2990 (unsigned long) cmd.response + sizeof resp,
2991 in_len - sizeof cmd, out_len - sizeof resp);
2992
2993 ret = __uverbs_create_xsrq(file, &xcmd, &udata);
2994 if (ret)
2995 return ret;
2996
2997 return in_len;
2998}
2999
3000ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
3001 const char __user *buf, int in_len, int out_len)
3002{
3003 struct ib_uverbs_create_xsrq cmd;
3004 struct ib_uverbs_create_srq_resp resp;
3005 struct ib_udata udata;
3006 int ret;
3007
3008 if (out_len < sizeof resp)
3009 return -ENOSPC;
3010
3011 if (copy_from_user(&cmd, buf, sizeof cmd))
3012 return -EFAULT;
3013
3014 INIT_UDATA(&udata, buf + sizeof cmd,
3015 (unsigned long) cmd.response + sizeof resp,
3016 in_len - sizeof cmd, out_len - sizeof resp);
3017
3018 ret = __uverbs_create_xsrq(file, &cmd, &udata);
3019 if (ret)
3020 return ret;
3021
3022 return in_len;
3023}
3024
3025ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
3026 const char __user *buf, int in_len,
3027 int out_len)
3028{
3029 struct ib_uverbs_modify_srq cmd;
3030 struct ib_udata udata;
3031 struct ib_srq *srq;
3032 struct ib_srq_attr attr;
3033 int ret;
3034
3035 if (copy_from_user(&cmd, buf, sizeof cmd))
3036 return -EFAULT;
3037
3038 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
3039 out_len);
3040
3041 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
3042 if (!srq)
3043 return -EINVAL;
3044
3045 attr.max_wr = cmd.max_wr;
3046 attr.srq_limit = cmd.srq_limit;
3047
3048 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata);
3049
3050 put_srq_read(srq);
3051
3052 return ret ? ret : in_len;
3053}
3054
3055ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
3056 const char __user *buf,
3057 int in_len, int out_len)
3058{
3059 struct ib_uverbs_query_srq cmd;
3060 struct ib_uverbs_query_srq_resp resp;
3061 struct ib_srq_attr attr;
3062 struct ib_srq *srq;
3063 int ret;
3064
3065 if (out_len < sizeof resp)
3066 return -ENOSPC;
3067
3068 if (copy_from_user(&cmd, buf, sizeof cmd))
3069 return -EFAULT;
3070
3071 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
3072 if (!srq)
3073 return -EINVAL;
3074
3075 ret = ib_query_srq(srq, &attr);
3076
3077 put_srq_read(srq);
3078
3079 if (ret)
3080 return ret;
3081
3082 memset(&resp, 0, sizeof resp);
3083
3084 resp.max_wr = attr.max_wr;
3085 resp.max_sge = attr.max_sge;
3086 resp.srq_limit = attr.srq_limit;
3087
3088 if (copy_to_user((void __user *) (unsigned long) cmd.response,
3089 &resp, sizeof resp))
3090 return -EFAULT;
3091
3092 return in_len;
3093}
3094
3095ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
3096 const char __user *buf, int in_len,
3097 int out_len)
3098{
3099 struct ib_uverbs_destroy_srq cmd;
3100 struct ib_uverbs_destroy_srq_resp resp;
3101 struct ib_uobject *uobj;
3102 struct ib_srq *srq;
3103 struct ib_uevent_object *obj;
3104 int ret = -EINVAL;
3105 struct ib_usrq_object *us;
3106 enum ib_srq_type srq_type;
3107
3108 if (copy_from_user(&cmd, buf, sizeof cmd))
3109 return -EFAULT;
3110
3111 uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext);
3112 if (!uobj)
3113 return -EINVAL;
3114 srq = uobj->object;
3115 obj = container_of(uobj, struct ib_uevent_object, uobject);
3116 srq_type = srq->srq_type;
3117
3118 ret = ib_destroy_srq(srq);
3119 if (!ret)
3120 uobj->live = 0;
3121
3122 put_uobj_write(uobj);
3123
3124 if (ret)
3125 return ret;
3126
3127 if (srq_type == IB_SRQT_XRC) {
3128 us = container_of(obj, struct ib_usrq_object, uevent);
3129 atomic_dec(&us->uxrcd->refcnt);
3130 }
3131
3132 idr_remove_uobj(&ib_uverbs_srq_idr, uobj);
3133
3134 mutex_lock(&file->mutex);
3135 list_del(&uobj->list);
3136 mutex_unlock(&file->mutex);
3137
3138 ib_uverbs_release_uevent(file, obj);
3139
3140 memset(&resp, 0, sizeof resp);
3141 resp.events_reported = obj->events_reported;
3142
3143 put_uobj(uobj);
3144
3145 if (copy_to_user((void __user *) (unsigned long) cmd.response,
3146 &resp, sizeof resp))
3147 ret = -EFAULT;
3148
3149 return ret ? ret : in_len;
3150}
3151