1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46#include <asm/page.h>
47#include <linux/io.h>
48#include <linux/wait.h>
49#include <rdma/ib_addr.h>
50#include <rdma/ib_smi.h>
51#include <rdma/ib_user_verbs.h>
52
53#include "pvrdma.h"
54
55
56
57
58
59
60
61
62int pvrdma_req_notify_cq(struct ib_cq *ibcq,
63 enum ib_cq_notify_flags notify_flags)
64{
65 struct pvrdma_dev *dev = to_vdev(ibcq->device);
66 struct pvrdma_cq *cq = to_vcq(ibcq);
67 u32 val = cq->cq_handle;
68 unsigned long flags;
69 int has_data = 0;
70
71 val |= (notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
72 PVRDMA_UAR_CQ_ARM_SOL : PVRDMA_UAR_CQ_ARM;
73
74 spin_lock_irqsave(&cq->cq_lock, flags);
75
76 pvrdma_write_uar_cq(dev, val);
77
78 if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
79 unsigned int head;
80
81 has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx,
82 cq->ibcq.cqe, &head);
83 if (unlikely(has_data == PVRDMA_INVALID_IDX))
84 dev_err(&dev->pdev->dev, "CQ ring state invalid\n");
85 }
86
87 spin_unlock_irqrestore(&cq->cq_lock, flags);
88
89 return has_data;
90}
91
92
93
94
95
96
97
98
99
100
101
102struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
103 const struct ib_cq_init_attr *attr,
104 struct ib_ucontext *context,
105 struct ib_udata *udata)
106{
107 int entries = attr->cqe;
108 struct pvrdma_dev *dev = to_vdev(ibdev);
109 struct pvrdma_cq *cq;
110 int ret;
111 int npages;
112 unsigned long flags;
113 union pvrdma_cmd_req req;
114 union pvrdma_cmd_resp rsp;
115 struct pvrdma_cmd_create_cq *cmd = &req.create_cq;
116 struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp;
117 struct pvrdma_create_cq_resp cq_resp = {0};
118 struct pvrdma_create_cq ucmd;
119
120 BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64);
121
122 entries = roundup_pow_of_two(entries);
123 if (entries < 1 || entries > dev->dsr->caps.max_cqe)
124 return ERR_PTR(-EINVAL);
125
126 if (!atomic_add_unless(&dev->num_cqs, 1, dev->dsr->caps.max_cq))
127 return ERR_PTR(-ENOMEM);
128
129 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
130 if (!cq) {
131 atomic_dec(&dev->num_cqs);
132 return ERR_PTR(-ENOMEM);
133 }
134
135 cq->ibcq.cqe = entries;
136 cq->is_kernel = !context;
137
138 if (!cq->is_kernel) {
139 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
140 ret = -EFAULT;
141 goto err_cq;
142 }
143
144 cq->umem = ib_umem_get(context, ucmd.buf_addr, ucmd.buf_size,
145 IB_ACCESS_LOCAL_WRITE, 1);
146 if (IS_ERR(cq->umem)) {
147 ret = PTR_ERR(cq->umem);
148 goto err_cq;
149 }
150
151 npages = ib_umem_page_count(cq->umem);
152 } else {
153
154 npages = 1 + (entries * sizeof(struct pvrdma_cqe) +
155 PAGE_SIZE - 1) / PAGE_SIZE;
156
157
158 cq->offset = PAGE_SIZE;
159 }
160
161 if (npages < 0 || npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
162 dev_warn(&dev->pdev->dev,
163 "overflow pages in completion queue\n");
164 ret = -EINVAL;
165 goto err_umem;
166 }
167
168 ret = pvrdma_page_dir_init(dev, &cq->pdir, npages, cq->is_kernel);
169 if (ret) {
170 dev_warn(&dev->pdev->dev,
171 "could not allocate page directory\n");
172 goto err_umem;
173 }
174
175
176 if (cq->is_kernel)
177 cq->ring_state = cq->pdir.pages[0];
178 else
179 pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0);
180
181 refcount_set(&cq->refcnt, 1);
182 init_completion(&cq->free);
183 spin_lock_init(&cq->cq_lock);
184
185 memset(cmd, 0, sizeof(*cmd));
186 cmd->hdr.cmd = PVRDMA_CMD_CREATE_CQ;
187 cmd->nchunks = npages;
188 cmd->ctx_handle = (context) ?
189 (u64)to_vucontext(context)->ctx_handle : 0;
190 cmd->cqe = entries;
191 cmd->pdir_dma = cq->pdir.dir_dma;
192 ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_CQ_RESP);
193 if (ret < 0) {
194 dev_warn(&dev->pdev->dev,
195 "could not create completion queue, error: %d\n", ret);
196 goto err_page_dir;
197 }
198
199 cq->ibcq.cqe = resp->cqe;
200 cq->cq_handle = resp->cq_handle;
201 cq_resp.cqn = resp->cq_handle;
202 spin_lock_irqsave(&dev->cq_tbl_lock, flags);
203 dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq;
204 spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
205
206 if (!cq->is_kernel) {
207 cq->uar = &(to_vucontext(context)->uar);
208
209
210 if (ib_copy_to_udata(udata, &cq_resp, sizeof(cq_resp))) {
211 dev_warn(&dev->pdev->dev,
212 "failed to copy back udata\n");
213 pvrdma_destroy_cq(&cq->ibcq);
214 return ERR_PTR(-EINVAL);
215 }
216 }
217
218 return &cq->ibcq;
219
220err_page_dir:
221 pvrdma_page_dir_cleanup(dev, &cq->pdir);
222err_umem:
223 if (!cq->is_kernel)
224 ib_umem_release(cq->umem);
225err_cq:
226 atomic_dec(&dev->num_cqs);
227 kfree(cq);
228
229 return ERR_PTR(ret);
230}
231
232static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
233{
234 if (refcount_dec_and_test(&cq->refcnt))
235 complete(&cq->free);
236 wait_for_completion(&cq->free);
237
238 if (!cq->is_kernel)
239 ib_umem_release(cq->umem);
240
241 pvrdma_page_dir_cleanup(dev, &cq->pdir);
242 kfree(cq);
243}
244
245
246
247
248
249
250
251int pvrdma_destroy_cq(struct ib_cq *cq)
252{
253 struct pvrdma_cq *vcq = to_vcq(cq);
254 union pvrdma_cmd_req req;
255 struct pvrdma_cmd_destroy_cq *cmd = &req.destroy_cq;
256 struct pvrdma_dev *dev = to_vdev(cq->device);
257 unsigned long flags;
258 int ret;
259
260 memset(cmd, 0, sizeof(*cmd));
261 cmd->hdr.cmd = PVRDMA_CMD_DESTROY_CQ;
262 cmd->cq_handle = vcq->cq_handle;
263
264 ret = pvrdma_cmd_post(dev, &req, NULL, 0);
265 if (ret < 0)
266 dev_warn(&dev->pdev->dev,
267 "could not destroy completion queue, error: %d\n",
268 ret);
269
270
271 spin_lock_irqsave(&dev->cq_tbl_lock, flags);
272 dev->cq_tbl[vcq->cq_handle] = NULL;
273 spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
274
275 pvrdma_free_cq(dev, vcq);
276 atomic_dec(&dev->num_cqs);
277
278 return ret;
279}
280
281static inline struct pvrdma_cqe *get_cqe(struct pvrdma_cq *cq, int i)
282{
283 return (struct pvrdma_cqe *)pvrdma_page_dir_get_ptr(
284 &cq->pdir,
285 cq->offset +
286 sizeof(struct pvrdma_cqe) * i);
287}
288
289void _pvrdma_flush_cqe(struct pvrdma_qp *qp, struct pvrdma_cq *cq)
290{
291 unsigned int head;
292 int has_data;
293
294 if (!cq->is_kernel)
295 return;
296
297
298 has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx,
299 cq->ibcq.cqe, &head);
300 if (unlikely(has_data > 0)) {
301 int items;
302 int curr;
303 int tail = pvrdma_idx(&cq->ring_state->rx.prod_tail,
304 cq->ibcq.cqe);
305 struct pvrdma_cqe *cqe;
306 struct pvrdma_cqe *curr_cqe;
307
308 items = (tail > head) ? (tail - head) :
309 (cq->ibcq.cqe - head + tail);
310 curr = --tail;
311 while (items-- > 0) {
312 if (curr < 0)
313 curr = cq->ibcq.cqe - 1;
314 if (tail < 0)
315 tail = cq->ibcq.cqe - 1;
316 curr_cqe = get_cqe(cq, curr);
317 if ((curr_cqe->qp & 0xFFFF) != qp->qp_handle) {
318 if (curr != tail) {
319 cqe = get_cqe(cq, tail);
320 *cqe = *curr_cqe;
321 }
322 tail--;
323 } else {
324 pvrdma_idx_ring_inc(
325 &cq->ring_state->rx.cons_head,
326 cq->ibcq.cqe);
327 }
328 curr--;
329 }
330 }
331}
332
333static int pvrdma_poll_one(struct pvrdma_cq *cq, struct pvrdma_qp **cur_qp,
334 struct ib_wc *wc)
335{
336 struct pvrdma_dev *dev = to_vdev(cq->ibcq.device);
337 int has_data;
338 unsigned int head;
339 bool tried = false;
340 struct pvrdma_cqe *cqe;
341
342retry:
343 has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx,
344 cq->ibcq.cqe, &head);
345 if (has_data == 0) {
346 if (tried)
347 return -EAGAIN;
348
349 pvrdma_write_uar_cq(dev, cq->cq_handle | PVRDMA_UAR_CQ_POLL);
350
351 tried = true;
352 goto retry;
353 } else if (has_data == PVRDMA_INVALID_IDX) {
354 dev_err(&dev->pdev->dev, "CQ ring state invalid\n");
355 return -EAGAIN;
356 }
357
358 cqe = get_cqe(cq, head);
359
360
361 rmb();
362 if (dev->qp_tbl[cqe->qp & 0xffff])
363 *cur_qp = (struct pvrdma_qp *)dev->qp_tbl[cqe->qp & 0xffff];
364 else
365 return -EAGAIN;
366
367 wc->opcode = pvrdma_wc_opcode_to_ib(cqe->opcode);
368 wc->status = pvrdma_wc_status_to_ib(cqe->status);
369 wc->wr_id = cqe->wr_id;
370 wc->qp = &(*cur_qp)->ibqp;
371 wc->byte_len = cqe->byte_len;
372 wc->ex.imm_data = cqe->imm_data;
373 wc->src_qp = cqe->src_qp;
374 wc->wc_flags = pvrdma_wc_flags_to_ib(cqe->wc_flags);
375 wc->pkey_index = cqe->pkey_index;
376 wc->slid = cqe->slid;
377 wc->sl = cqe->sl;
378 wc->dlid_path_bits = cqe->dlid_path_bits;
379 wc->port_num = cqe->port_num;
380 wc->vendor_err = cqe->vendor_err;
381 wc->network_hdr_type = cqe->network_hdr_type;
382
383
384 pvrdma_idx_ring_inc(&cq->ring_state->rx.cons_head, cq->ibcq.cqe);
385
386 return 0;
387}
388
389
390
391
392
393
394
395
396
397int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
398{
399 struct pvrdma_cq *cq = to_vcq(ibcq);
400 struct pvrdma_qp *cur_qp = NULL;
401 unsigned long flags;
402 int npolled;
403
404 if (num_entries < 1 || wc == NULL)
405 return 0;
406
407 spin_lock_irqsave(&cq->cq_lock, flags);
408 for (npolled = 0; npolled < num_entries; ++npolled) {
409 if (pvrdma_poll_one(cq, &cur_qp, wc + npolled))
410 break;
411 }
412
413 spin_unlock_irqrestore(&cq->cq_lock, flags);
414
415
416 return npolled;
417}
418