1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/err.h>
35#include <linux/vmalloc.h>
36
37#include "ipath_verbs.h"
38
39
40
41
42
43
44
45
46
47void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
48{
49 struct ipath_cq_wc *wc;
50 unsigned long flags;
51 u32 head;
52 u32 next;
53
54 spin_lock_irqsave(&cq->lock, flags);
55
56
57
58
59
60 wc = cq->queue;
61 head = wc->head;
62 if (head >= (unsigned) cq->ibcq.cqe) {
63 head = cq->ibcq.cqe;
64 next = 0;
65 } else
66 next = head + 1;
67 if (unlikely(next == wc->tail)) {
68 spin_unlock_irqrestore(&cq->lock, flags);
69 if (cq->ibcq.event_handler) {
70 struct ib_event ev;
71
72 ev.device = cq->ibcq.device;
73 ev.element.cq = &cq->ibcq;
74 ev.event = IB_EVENT_CQ_ERR;
75 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
76 }
77 return;
78 }
79 if (cq->ip) {
80 wc->uqueue[head].wr_id = entry->wr_id;
81 wc->uqueue[head].status = entry->status;
82 wc->uqueue[head].opcode = entry->opcode;
83 wc->uqueue[head].vendor_err = entry->vendor_err;
84 wc->uqueue[head].byte_len = entry->byte_len;
85 wc->uqueue[head].ex.imm_data = (__u32 __force) entry->ex.imm_data;
86 wc->uqueue[head].qp_num = entry->qp->qp_num;
87 wc->uqueue[head].src_qp = entry->src_qp;
88 wc->uqueue[head].wc_flags = entry->wc_flags;
89 wc->uqueue[head].pkey_index = entry->pkey_index;
90 wc->uqueue[head].slid = entry->slid;
91 wc->uqueue[head].sl = entry->sl;
92 wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits;
93 wc->uqueue[head].port_num = entry->port_num;
94
95 smp_wmb();
96 } else
97 wc->kqueue[head] = *entry;
98 wc->head = next;
99
100 if (cq->notify == IB_CQ_NEXT_COMP ||
101 (cq->notify == IB_CQ_SOLICITED && solicited)) {
102 cq->notify = IB_CQ_NONE;
103 cq->triggered++;
104
105
106
107
108 tasklet_hi_schedule(&cq->comptask);
109 }
110
111 spin_unlock_irqrestore(&cq->lock, flags);
112
113 if (entry->status != IB_WC_SUCCESS)
114 to_idev(cq->ibcq.device)->n_wqe_errs++;
115}
116
117
118
119
120
121
122
123
124
125
126
127
128int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
129{
130 struct ipath_cq *cq = to_icq(ibcq);
131 struct ipath_cq_wc *wc;
132 unsigned long flags;
133 int npolled;
134 u32 tail;
135
136
137 if (cq->ip) {
138 npolled = -EINVAL;
139 goto bail;
140 }
141
142 spin_lock_irqsave(&cq->lock, flags);
143
144 wc = cq->queue;
145 tail = wc->tail;
146 if (tail > (u32) cq->ibcq.cqe)
147 tail = (u32) cq->ibcq.cqe;
148 for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
149 if (tail == wc->head)
150 break;
151
152 *entry = wc->kqueue[tail];
153 if (tail >= cq->ibcq.cqe)
154 tail = 0;
155 else
156 tail++;
157 }
158 wc->tail = tail;
159
160 spin_unlock_irqrestore(&cq->lock, flags);
161
162bail:
163 return npolled;
164}
165
166static void send_complete(unsigned long data)
167{
168 struct ipath_cq *cq = (struct ipath_cq *)data;
169
170
171
172
173
174
175
176
177 for (;;) {
178 u8 triggered = cq->triggered;
179
180 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
181
182 if (cq->triggered == triggered)
183 return;
184 }
185}
186
187
188
189
190
191
192
193
194
195
196
197
198
199struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vector,
200 struct ib_ucontext *context,
201 struct ib_udata *udata)
202{
203 struct ipath_ibdev *dev = to_idev(ibdev);
204 struct ipath_cq *cq;
205 struct ipath_cq_wc *wc;
206 struct ib_cq *ret;
207 u32 sz;
208
209 if (entries < 1 || entries > ib_ipath_max_cqes) {
210 ret = ERR_PTR(-EINVAL);
211 goto done;
212 }
213
214
215 cq = kmalloc(sizeof(*cq), GFP_KERNEL);
216 if (!cq) {
217 ret = ERR_PTR(-ENOMEM);
218 goto done;
219 }
220
221
222
223
224
225
226
227
228 sz = sizeof(*wc);
229 if (udata && udata->outlen >= sizeof(__u64))
230 sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
231 else
232 sz += sizeof(struct ib_wc) * (entries + 1);
233 wc = vmalloc_user(sz);
234 if (!wc) {
235 ret = ERR_PTR(-ENOMEM);
236 goto bail_cq;
237 }
238
239
240
241
242
243 if (udata && udata->outlen >= sizeof(__u64)) {
244 int err;
245
246 cq->ip = ipath_create_mmap_info(dev, sz, context, wc);
247 if (!cq->ip) {
248 ret = ERR_PTR(-ENOMEM);
249 goto bail_wc;
250 }
251
252 err = ib_copy_to_udata(udata, &cq->ip->offset,
253 sizeof(cq->ip->offset));
254 if (err) {
255 ret = ERR_PTR(err);
256 goto bail_ip;
257 }
258 } else
259 cq->ip = NULL;
260
261 spin_lock(&dev->n_cqs_lock);
262 if (dev->n_cqs_allocated == ib_ipath_max_cqs) {
263 spin_unlock(&dev->n_cqs_lock);
264 ret = ERR_PTR(-ENOMEM);
265 goto bail_ip;
266 }
267
268 dev->n_cqs_allocated++;
269 spin_unlock(&dev->n_cqs_lock);
270
271 if (cq->ip) {
272 spin_lock_irq(&dev->pending_lock);
273 list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
274 spin_unlock_irq(&dev->pending_lock);
275 }
276
277
278
279
280
281
282 cq->ibcq.cqe = entries;
283 cq->notify = IB_CQ_NONE;
284 cq->triggered = 0;
285 spin_lock_init(&cq->lock);
286 tasklet_init(&cq->comptask, send_complete, (unsigned long)cq);
287 wc->head = 0;
288 wc->tail = 0;
289 cq->queue = wc;
290
291 ret = &cq->ibcq;
292
293 goto done;
294
295bail_ip:
296 kfree(cq->ip);
297bail_wc:
298 vfree(wc);
299bail_cq:
300 kfree(cq);
301done:
302 return ret;
303}
304
305
306
307
308
309
310
311
312
313int ipath_destroy_cq(struct ib_cq *ibcq)
314{
315 struct ipath_ibdev *dev = to_idev(ibcq->device);
316 struct ipath_cq *cq = to_icq(ibcq);
317
318 tasklet_kill(&cq->comptask);
319 spin_lock(&dev->n_cqs_lock);
320 dev->n_cqs_allocated--;
321 spin_unlock(&dev->n_cqs_lock);
322 if (cq->ip)
323 kref_put(&cq->ip->ref, ipath_release_mmap_info);
324 else
325 vfree(cq->queue);
326 kfree(cq);
327
328 return 0;
329}
330
331
332
333
334
335
336
337
338
339
340
341int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
342{
343 struct ipath_cq *cq = to_icq(ibcq);
344 unsigned long flags;
345 int ret = 0;
346
347 spin_lock_irqsave(&cq->lock, flags);
348
349
350
351
352 if (cq->notify != IB_CQ_NEXT_COMP)
353 cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
354
355 if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
356 cq->queue->head != cq->queue->tail)
357 ret = 1;
358
359 spin_unlock_irqrestore(&cq->lock, flags);
360
361 return ret;
362}
363
364
365
366
367
368
369
370int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
371{
372 struct ipath_cq *cq = to_icq(ibcq);
373 struct ipath_cq_wc *old_wc;
374 struct ipath_cq_wc *wc;
375 u32 head, tail, n;
376 int ret;
377 u32 sz;
378
379 if (cqe < 1 || cqe > ib_ipath_max_cqes) {
380 ret = -EINVAL;
381 goto bail;
382 }
383
384
385
386
387 sz = sizeof(*wc);
388 if (udata && udata->outlen >= sizeof(__u64))
389 sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
390 else
391 sz += sizeof(struct ib_wc) * (cqe + 1);
392 wc = vmalloc_user(sz);
393 if (!wc) {
394 ret = -ENOMEM;
395 goto bail;
396 }
397
398
399 if (udata && udata->outlen >= sizeof(__u64)) {
400 __u64 offset = 0;
401
402 ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
403 if (ret)
404 goto bail_free;
405 }
406
407 spin_lock_irq(&cq->lock);
408
409
410
411
412 old_wc = cq->queue;
413 head = old_wc->head;
414 if (head > (u32) cq->ibcq.cqe)
415 head = (u32) cq->ibcq.cqe;
416 tail = old_wc->tail;
417 if (tail > (u32) cq->ibcq.cqe)
418 tail = (u32) cq->ibcq.cqe;
419 if (head < tail)
420 n = cq->ibcq.cqe + 1 + head - tail;
421 else
422 n = head - tail;
423 if (unlikely((u32)cqe < n)) {
424 ret = -EINVAL;
425 goto bail_unlock;
426 }
427 for (n = 0; tail != head; n++) {
428 if (cq->ip)
429 wc->uqueue[n] = old_wc->uqueue[tail];
430 else
431 wc->kqueue[n] = old_wc->kqueue[tail];
432 if (tail == (u32) cq->ibcq.cqe)
433 tail = 0;
434 else
435 tail++;
436 }
437 cq->ibcq.cqe = cqe;
438 wc->head = n;
439 wc->tail = 0;
440 cq->queue = wc;
441 spin_unlock_irq(&cq->lock);
442
443 vfree(old_wc);
444
445 if (cq->ip) {
446 struct ipath_ibdev *dev = to_idev(ibcq->device);
447 struct ipath_mmap_info *ip = cq->ip;
448
449 ipath_update_mmap_info(dev, ip, sz, wc);
450
451
452
453
454
455 if (udata && udata->outlen >= sizeof(__u64)) {
456 ret = ib_copy_to_udata(udata, &ip->offset,
457 sizeof(ip->offset));
458 if (ret)
459 goto bail;
460 }
461
462 spin_lock_irq(&dev->pending_lock);
463 if (list_empty(&ip->pending_mmaps))
464 list_add(&ip->pending_mmaps, &dev->pending_mmaps);
465 spin_unlock_irq(&dev->pending_lock);
466 }
467
468 ret = 0;
469 goto bail;
470
471bail_unlock:
472 spin_unlock_irq(&cq->lock);
473bail_free:
474 vfree(wc);
475bail:
476 return ret;
477}
478