1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/vmalloc.h>
35#include "rxe.h"
36#include "rxe_loc.h"
37#include "rxe_queue.h"
38
39int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
40 struct ib_srq_attr *attr, enum ib_srq_attr_mask mask)
41{
42 if (srq && srq->error) {
43 pr_warn("srq in error state\n");
44 goto err1;
45 }
46
47 if (mask & IB_SRQ_MAX_WR) {
48 if (attr->max_wr > rxe->attr.max_srq_wr) {
49 pr_warn("max_wr(%d) > max_srq_wr(%d)\n",
50 attr->max_wr, rxe->attr.max_srq_wr);
51 goto err1;
52 }
53
54 if (attr->max_wr <= 0) {
55 pr_warn("max_wr(%d) <= 0\n", attr->max_wr);
56 goto err1;
57 }
58
59 if (srq && srq->limit && (attr->max_wr < srq->limit)) {
60 pr_warn("max_wr (%d) < srq->limit (%d)\n",
61 attr->max_wr, srq->limit);
62 goto err1;
63 }
64
65 if (attr->max_wr < RXE_MIN_SRQ_WR)
66 attr->max_wr = RXE_MIN_SRQ_WR;
67 }
68
69 if (mask & IB_SRQ_LIMIT) {
70 if (attr->srq_limit > rxe->attr.max_srq_wr) {
71 pr_warn("srq_limit(%d) > max_srq_wr(%d)\n",
72 attr->srq_limit, rxe->attr.max_srq_wr);
73 goto err1;
74 }
75
76 if (srq && (attr->srq_limit > srq->rq.queue->buf->index_mask)) {
77 pr_warn("srq_limit (%d) > cur limit(%d)\n",
78 attr->srq_limit,
79 srq->rq.queue->buf->index_mask);
80 goto err1;
81 }
82 }
83
84 if (mask == IB_SRQ_INIT_MASK) {
85 if (attr->max_sge > rxe->attr.max_srq_sge) {
86 pr_warn("max_sge(%d) > max_srq_sge(%d)\n",
87 attr->max_sge, rxe->attr.max_srq_sge);
88 goto err1;
89 }
90
91 if (attr->max_sge < RXE_MIN_SRQ_SGE)
92 attr->max_sge = RXE_MIN_SRQ_SGE;
93 }
94
95 return 0;
96
97err1:
98 return -EINVAL;
99}
100
101int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
102 struct ib_srq_init_attr *init,
103 struct ib_ucontext *context,
104 struct rxe_create_srq_resp __user *uresp)
105{
106 int err;
107 int srq_wqe_size;
108 struct rxe_queue *q;
109
110 srq->ibsrq.event_handler = init->event_handler;
111 srq->ibsrq.srq_context = init->srq_context;
112 srq->limit = init->attr.srq_limit;
113 srq->srq_num = srq->pelem.index;
114 srq->rq.max_wr = init->attr.max_wr;
115 srq->rq.max_sge = init->attr.max_sge;
116
117 srq_wqe_size = rcv_wqe_size(srq->rq.max_sge);
118
119 spin_lock_init(&srq->rq.producer_lock);
120 spin_lock_init(&srq->rq.consumer_lock);
121
122 q = rxe_queue_init(rxe, &srq->rq.max_wr,
123 srq_wqe_size);
124 if (!q) {
125 pr_warn("unable to allocate queue for srq\n");
126 return -ENOMEM;
127 }
128
129 srq->rq.queue = q;
130
131 err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, context, q->buf,
132 q->buf_size, &q->ip);
133 if (err) {
134 vfree(q->buf);
135 kfree(q);
136 return err;
137 }
138
139 if (uresp) {
140 if (copy_to_user(&uresp->srq_num, &srq->srq_num,
141 sizeof(uresp->srq_num))) {
142 rxe_queue_cleanup(q);
143 return -EFAULT;
144 }
145 }
146
147 return 0;
148}
149
150int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
151 struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
152 struct rxe_modify_srq_cmd *ucmd)
153{
154 int err;
155 struct rxe_queue *q = srq->rq.queue;
156 struct mminfo __user *mi = NULL;
157
158 if (mask & IB_SRQ_MAX_WR) {
159
160
161
162
163 mi = u64_to_user_ptr(ucmd->mmap_info_addr);
164
165 err = rxe_queue_resize(q, &attr->max_wr,
166 rcv_wqe_size(srq->rq.max_sge),
167 srq->rq.queue->ip ?
168 srq->rq.queue->ip->context :
169 NULL,
170 mi, &srq->rq.producer_lock,
171 &srq->rq.consumer_lock);
172 if (err)
173 goto err2;
174 }
175
176 if (mask & IB_SRQ_LIMIT)
177 srq->limit = attr->srq_limit;
178
179 return 0;
180
181err2:
182 rxe_queue_cleanup(q);
183 srq->rq.queue = NULL;
184 return err;
185}
186