1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include <linux/slab.h>
36#include <linux/string.h>
37#include <linux/sched.h>
38
39#include <asm/io.h>
40
41#include "mthca_dev.h"
42#include "mthca_cmd.h"
43#include "mthca_memfree.h"
44#include "mthca_wqe.h"
45
46enum {
47 MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE
48};
49
50struct mthca_tavor_srq_context {
51 __be64 wqe_base_ds;
52 __be32 state_pd;
53 __be32 lkey;
54 __be32 uar;
55 __be16 limit_watermark;
56 __be16 wqe_cnt;
57 u32 reserved[2];
58};
59
60struct mthca_arbel_srq_context {
61 __be32 state_logsize_srqn;
62 __be32 lkey;
63 __be32 db_index;
64 __be32 logstride_usrpage;
65 __be64 wqe_base;
66 __be32 eq_pd;
67 __be16 limit_watermark;
68 __be16 wqe_cnt;
69 u16 reserved1;
70 __be16 wqe_counter;
71 u32 reserved2[3];
72};
73
74static void *get_wqe(struct mthca_srq *srq, int n)
75{
76 if (srq->is_direct)
77 return srq->queue.direct.buf + (n << srq->wqe_shift);
78 else
79 return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf +
80 ((n << srq->wqe_shift) & (PAGE_SIZE - 1));
81}
82
83
84
85
86
87
88
89
90
91
92static inline int *wqe_to_link(void *wqe)
93{
94 return (int *) (wqe + offsetof(struct mthca_next_seg, imm));
95}
96
97static void mthca_tavor_init_srq_context(struct mthca_dev *dev,
98 struct mthca_pd *pd,
99 struct mthca_srq *srq,
100 struct mthca_tavor_srq_context *context)
101{
102 memset(context, 0, sizeof *context);
103
104 context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4));
105 context->state_pd = cpu_to_be32(pd->pd_num);
106 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
107
108 if (pd->ibpd.uobject)
109 context->uar =
110 cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
111 else
112 context->uar = cpu_to_be32(dev->driver_uar.index);
113}
114
115static void mthca_arbel_init_srq_context(struct mthca_dev *dev,
116 struct mthca_pd *pd,
117 struct mthca_srq *srq,
118 struct mthca_arbel_srq_context *context)
119{
120 int logsize, max;
121
122 memset(context, 0, sizeof *context);
123
124
125
126
127
128 max = srq->max;
129 logsize = ilog2(max);
130 context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn);
131 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
132 context->db_index = cpu_to_be32(srq->db_index);
133 context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29);
134 if (pd->ibpd.uobject)
135 context->logstride_usrpage |=
136 cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
137 else
138 context->logstride_usrpage |= cpu_to_be32(dev->driver_uar.index);
139 context->eq_pd = cpu_to_be32(MTHCA_EQ_ASYNC << 24 | pd->pd_num);
140}
141
142static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq)
143{
144 mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue,
145 srq->is_direct, &srq->mr);
146 kfree(srq->wrid);
147}
148
149static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
150 struct mthca_srq *srq)
151{
152 struct mthca_data_seg *scatter;
153 void *wqe;
154 int err;
155 int i;
156
157 if (pd->ibpd.uobject)
158 return 0;
159
160 srq->wrid = kmalloc(srq->max * sizeof (u64), GFP_KERNEL);
161 if (!srq->wrid)
162 return -ENOMEM;
163
164 err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift,
165 MTHCA_MAX_DIRECT_SRQ_SIZE,
166 &srq->queue, &srq->is_direct, pd, 1, &srq->mr);
167 if (err) {
168 kfree(srq->wrid);
169 return err;
170 }
171
172
173
174
175
176
177 for (i = 0; i < srq->max; ++i) {
178 wqe = get_wqe(srq, i);
179
180 *wqe_to_link(wqe) = i < srq->max - 1 ? i + 1 : -1;
181
182 for (scatter = wqe + sizeof (struct mthca_next_seg);
183 (void *) scatter < wqe + (1 << srq->wqe_shift);
184 ++scatter)
185 scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
186 }
187
188 srq->last = get_wqe(srq, srq->max - 1);
189
190 return 0;
191}
192
193int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
194 struct ib_srq_attr *attr, struct mthca_srq *srq)
195{
196 struct mthca_mailbox *mailbox;
197 u8 status;
198 int ds;
199 int err;
200
201
202 if (attr->max_wr > dev->limits.max_srq_wqes ||
203 attr->max_sge > dev->limits.max_srq_sge)
204 return -EINVAL;
205
206 srq->max = attr->max_wr;
207 srq->max_gs = attr->max_sge;
208 srq->counter = 0;
209
210 if (mthca_is_memfree(dev))
211 srq->max = roundup_pow_of_two(srq->max + 1);
212 else
213 srq->max = srq->max + 1;
214
215 ds = max(64UL,
216 roundup_pow_of_two(sizeof (struct mthca_next_seg) +
217 srq->max_gs * sizeof (struct mthca_data_seg)));
218
219 if (!mthca_is_memfree(dev) && (ds > dev->limits.max_desc_sz))
220 return -EINVAL;
221
222 srq->wqe_shift = ilog2(ds);
223
224 srq->srqn = mthca_alloc(&dev->srq_table.alloc);
225 if (srq->srqn == -1)
226 return -ENOMEM;
227
228 if (mthca_is_memfree(dev)) {
229 err = mthca_table_get(dev, dev->srq_table.table, srq->srqn);
230 if (err)
231 goto err_out;
232
233 if (!pd->ibpd.uobject) {
234 srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ,
235 srq->srqn, &srq->db);
236 if (srq->db_index < 0) {
237 err = -ENOMEM;
238 goto err_out_icm;
239 }
240 }
241 }
242
243 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
244 if (IS_ERR(mailbox)) {
245 err = PTR_ERR(mailbox);
246 goto err_out_db;
247 }
248
249 err = mthca_alloc_srq_buf(dev, pd, srq);
250 if (err)
251 goto err_out_mailbox;
252
253 spin_lock_init(&srq->lock);
254 srq->refcount = 1;
255 init_waitqueue_head(&srq->wait);
256 mutex_init(&srq->mutex);
257
258 if (mthca_is_memfree(dev))
259 mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf);
260 else
261 mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf);
262
263 err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn, &status);
264
265 if (err) {
266 mthca_warn(dev, "SW2HW_SRQ failed (%d)\n", err);
267 goto err_out_free_buf;
268 }
269 if (status) {
270 mthca_warn(dev, "SW2HW_SRQ returned status 0x%02x\n",
271 status);
272 err = -EINVAL;
273 goto err_out_free_buf;
274 }
275
276 spin_lock_irq(&dev->srq_table.lock);
277 if (mthca_array_set(&dev->srq_table.srq,
278 srq->srqn & (dev->limits.num_srqs - 1),
279 srq)) {
280 spin_unlock_irq(&dev->srq_table.lock);
281 goto err_out_free_srq;
282 }
283 spin_unlock_irq(&dev->srq_table.lock);
284
285 mthca_free_mailbox(dev, mailbox);
286
287 srq->first_free = 0;
288 srq->last_free = srq->max - 1;
289
290 attr->max_wr = srq->max - 1;
291 attr->max_sge = srq->max_gs;
292
293 return 0;
294
295err_out_free_srq:
296 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status);
297 if (err)
298 mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
299 else if (status)
300 mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status);
301
302err_out_free_buf:
303 if (!pd->ibpd.uobject)
304 mthca_free_srq_buf(dev, srq);
305
306err_out_mailbox:
307 mthca_free_mailbox(dev, mailbox);
308
309err_out_db:
310 if (!pd->ibpd.uobject && mthca_is_memfree(dev))
311 mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
312
313err_out_icm:
314 mthca_table_put(dev, dev->srq_table.table, srq->srqn);
315
316err_out:
317 mthca_free(&dev->srq_table.alloc, srq->srqn);
318
319 return err;
320}
321
322static inline int get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq)
323{
324 int c;
325
326 spin_lock_irq(&dev->srq_table.lock);
327 c = srq->refcount;
328 spin_unlock_irq(&dev->srq_table.lock);
329
330 return c;
331}
332
333void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
334{
335 struct mthca_mailbox *mailbox;
336 int err;
337 u8 status;
338
339 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
340 if (IS_ERR(mailbox)) {
341 mthca_warn(dev, "No memory for mailbox to free SRQ.\n");
342 return;
343 }
344
345 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status);
346 if (err)
347 mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
348 else if (status)
349 mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status);
350
351 spin_lock_irq(&dev->srq_table.lock);
352 mthca_array_clear(&dev->srq_table.srq,
353 srq->srqn & (dev->limits.num_srqs - 1));
354 --srq->refcount;
355 spin_unlock_irq(&dev->srq_table.lock);
356
357 wait_event(srq->wait, !get_srq_refcount(dev, srq));
358
359 if (!srq->ibsrq.uobject) {
360 mthca_free_srq_buf(dev, srq);
361 if (mthca_is_memfree(dev))
362 mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
363 }
364
365 mthca_table_put(dev, dev->srq_table.table, srq->srqn);
366 mthca_free(&dev->srq_table.alloc, srq->srqn);
367 mthca_free_mailbox(dev, mailbox);
368}
369
370int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
371 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
372{
373 struct mthca_dev *dev = to_mdev(ibsrq->device);
374 struct mthca_srq *srq = to_msrq(ibsrq);
375 int ret;
376 u8 status;
377
378
379 if (attr_mask & IB_SRQ_MAX_WR)
380 return -EINVAL;
381
382 if (attr_mask & IB_SRQ_LIMIT) {
383 u32 max_wr = mthca_is_memfree(dev) ? srq->max - 1 : srq->max;
384 if (attr->srq_limit > max_wr)
385 return -EINVAL;
386
387 mutex_lock(&srq->mutex);
388 ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status);
389 mutex_unlock(&srq->mutex);
390
391 if (ret)
392 return ret;
393 if (status)
394 return -EINVAL;
395 }
396
397 return 0;
398}
399
400int mthca_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
401{
402 struct mthca_dev *dev = to_mdev(ibsrq->device);
403 struct mthca_srq *srq = to_msrq(ibsrq);
404 struct mthca_mailbox *mailbox;
405 struct mthca_arbel_srq_context *arbel_ctx;
406 struct mthca_tavor_srq_context *tavor_ctx;
407 u8 status;
408 int err;
409
410 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
411 if (IS_ERR(mailbox))
412 return PTR_ERR(mailbox);
413
414 err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox, &status);
415 if (err)
416 goto out;
417
418 if (mthca_is_memfree(dev)) {
419 arbel_ctx = mailbox->buf;
420 srq_attr->srq_limit = be16_to_cpu(arbel_ctx->limit_watermark);
421 } else {
422 tavor_ctx = mailbox->buf;
423 srq_attr->srq_limit = be16_to_cpu(tavor_ctx->limit_watermark);
424 }
425
426 srq_attr->max_wr = srq->max - 1;
427 srq_attr->max_sge = srq->max_gs;
428
429out:
430 mthca_free_mailbox(dev, mailbox);
431
432 return err;
433}
434
435void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
436 enum ib_event_type event_type)
437{
438 struct mthca_srq *srq;
439 struct ib_event event;
440
441 spin_lock(&dev->srq_table.lock);
442 srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1));
443 if (srq)
444 ++srq->refcount;
445 spin_unlock(&dev->srq_table.lock);
446
447 if (!srq) {
448 mthca_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
449 return;
450 }
451
452 if (!srq->ibsrq.event_handler)
453 goto out;
454
455 event.device = &dev->ib_dev;
456 event.event = event_type;
457 event.element.srq = &srq->ibsrq;
458 srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context);
459
460out:
461 spin_lock(&dev->srq_table.lock);
462 if (!--srq->refcount)
463 wake_up(&srq->wait);
464 spin_unlock(&dev->srq_table.lock);
465}
466
467
468
469
470void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr)
471{
472 int ind;
473
474 ind = wqe_addr >> srq->wqe_shift;
475
476 spin_lock(&srq->lock);
477
478 if (likely(srq->first_free >= 0))
479 *wqe_to_link(get_wqe(srq, srq->last_free)) = ind;
480 else
481 srq->first_free = ind;
482
483 *wqe_to_link(get_wqe(srq, ind)) = -1;
484 srq->last_free = ind;
485
486 spin_unlock(&srq->lock);
487}
488
489int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
490 struct ib_recv_wr **bad_wr)
491{
492 struct mthca_dev *dev = to_mdev(ibsrq->device);
493 struct mthca_srq *srq = to_msrq(ibsrq);
494 unsigned long flags;
495 int err = 0;
496 int first_ind;
497 int ind;
498 int next_ind;
499 int nreq;
500 int i;
501 void *wqe;
502 void *prev_wqe;
503
504 spin_lock_irqsave(&srq->lock, flags);
505
506 first_ind = srq->first_free;
507
508 for (nreq = 0; wr; wr = wr->next) {
509 ind = srq->first_free;
510
511 if (unlikely(ind < 0)) {
512 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
513 err = -ENOMEM;
514 *bad_wr = wr;
515 break;
516 }
517
518 wqe = get_wqe(srq, ind);
519 next_ind = *wqe_to_link(wqe);
520
521 if (unlikely(next_ind < 0)) {
522 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
523 err = -ENOMEM;
524 *bad_wr = wr;
525 break;
526 }
527
528 prev_wqe = srq->last;
529 srq->last = wqe;
530
531 ((struct mthca_next_seg *) wqe)->nda_op = 0;
532 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
533
534
535 wqe += sizeof (struct mthca_next_seg);
536
537 if (unlikely(wr->num_sge > srq->max_gs)) {
538 err = -EINVAL;
539 *bad_wr = wr;
540 srq->last = prev_wqe;
541 break;
542 }
543
544 for (i = 0; i < wr->num_sge; ++i) {
545 mthca_set_data_seg(wqe, wr->sg_list + i);
546 wqe += sizeof (struct mthca_data_seg);
547 }
548
549 if (i < srq->max_gs)
550 mthca_set_data_seg_inval(wqe);
551
552 ((struct mthca_next_seg *) prev_wqe)->nda_op =
553 cpu_to_be32((ind << srq->wqe_shift) | 1);
554 wmb();
555 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
556 cpu_to_be32(MTHCA_NEXT_DBD);
557
558 srq->wrid[ind] = wr->wr_id;
559 srq->first_free = next_ind;
560
561 ++nreq;
562 if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
563 nreq = 0;
564
565
566
567
568
569 wmb();
570
571 mthca_write64(first_ind << srq->wqe_shift, srq->srqn << 8,
572 dev->kar + MTHCA_RECEIVE_DOORBELL,
573 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
574
575 first_ind = srq->first_free;
576 }
577 }
578
579 if (likely(nreq)) {
580
581
582
583
584 wmb();
585
586 mthca_write64(first_ind << srq->wqe_shift, (srq->srqn << 8) | nreq,
587 dev->kar + MTHCA_RECEIVE_DOORBELL,
588 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
589 }
590
591
592
593
594
595 mmiowb();
596
597 spin_unlock_irqrestore(&srq->lock, flags);
598 return err;
599}
600
601int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
602 struct ib_recv_wr **bad_wr)
603{
604 struct mthca_dev *dev = to_mdev(ibsrq->device);
605 struct mthca_srq *srq = to_msrq(ibsrq);
606 unsigned long flags;
607 int err = 0;
608 int ind;
609 int next_ind;
610 int nreq;
611 int i;
612 void *wqe;
613
614 spin_lock_irqsave(&srq->lock, flags);
615
616 for (nreq = 0; wr; ++nreq, wr = wr->next) {
617 ind = srq->first_free;
618
619 if (unlikely(ind < 0)) {
620 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
621 err = -ENOMEM;
622 *bad_wr = wr;
623 break;
624 }
625
626 wqe = get_wqe(srq, ind);
627 next_ind = *wqe_to_link(wqe);
628
629 if (unlikely(next_ind < 0)) {
630 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
631 err = -ENOMEM;
632 *bad_wr = wr;
633 break;
634 }
635
636 ((struct mthca_next_seg *) wqe)->nda_op =
637 cpu_to_be32((next_ind << srq->wqe_shift) | 1);
638 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
639
640
641 wqe += sizeof (struct mthca_next_seg);
642
643 if (unlikely(wr->num_sge > srq->max_gs)) {
644 err = -EINVAL;
645 *bad_wr = wr;
646 break;
647 }
648
649 for (i = 0; i < wr->num_sge; ++i) {
650 mthca_set_data_seg(wqe, wr->sg_list + i);
651 wqe += sizeof (struct mthca_data_seg);
652 }
653
654 if (i < srq->max_gs)
655 mthca_set_data_seg_inval(wqe);
656
657 srq->wrid[ind] = wr->wr_id;
658 srq->first_free = next_ind;
659 }
660
661 if (likely(nreq)) {
662 srq->counter += nreq;
663
664
665
666
667
668 wmb();
669 *srq->db = cpu_to_be32(srq->counter);
670 }
671
672 spin_unlock_irqrestore(&srq->lock, flags);
673 return err;
674}
675
676int mthca_max_srq_sge(struct mthca_dev *dev)
677{
678 if (mthca_is_memfree(dev))
679 return dev->limits.max_sg;
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695 return min_t(int, dev->limits.max_sg,
696 ((1 << (fls(dev->limits.max_desc_sz) - 1)) -
697 sizeof (struct mthca_next_seg)) /
698 sizeof (struct mthca_data_seg));
699}
700
701int mthca_init_srq_table(struct mthca_dev *dev)
702{
703 int err;
704
705 if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
706 return 0;
707
708 spin_lock_init(&dev->srq_table.lock);
709
710 err = mthca_alloc_init(&dev->srq_table.alloc,
711 dev->limits.num_srqs,
712 dev->limits.num_srqs - 1,
713 dev->limits.reserved_srqs);
714 if (err)
715 return err;
716
717 err = mthca_array_init(&dev->srq_table.srq,
718 dev->limits.num_srqs);
719 if (err)
720 mthca_alloc_cleanup(&dev->srq_table.alloc);
721
722 return err;
723}
724
725void mthca_cleanup_srq_table(struct mthca_dev *dev)
726{
727 if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
728 return;
729
730 mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs);
731 mthca_alloc_cleanup(&dev->srq_table.alloc);
732}
733