1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/skbuff.h>
35#include <linux/delay.h>
36#include <linux/sched.h>
37#include <linux/vmalloc.h>
38#include <rdma/uverbs_ioctl.h>
39
40#include "rxe.h"
41#include "rxe_loc.h"
42#include "rxe_queue.h"
43#include "rxe_task.h"
44
45static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
46 int has_srq)
47{
48 if (cap->max_send_wr > rxe->attr.max_qp_wr) {
49 pr_warn("invalid send wr = %d > %d\n",
50 cap->max_send_wr, rxe->attr.max_qp_wr);
51 goto err1;
52 }
53
54 if (cap->max_send_sge > rxe->attr.max_send_sge) {
55 pr_warn("invalid send sge = %d > %d\n",
56 cap->max_send_sge, rxe->attr.max_send_sge);
57 goto err1;
58 }
59
60 if (!has_srq) {
61 if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
62 pr_warn("invalid recv wr = %d > %d\n",
63 cap->max_recv_wr, rxe->attr.max_qp_wr);
64 goto err1;
65 }
66
67 if (cap->max_recv_sge > rxe->attr.max_recv_sge) {
68 pr_warn("invalid recv sge = %d > %d\n",
69 cap->max_recv_sge, rxe->attr.max_recv_sge);
70 goto err1;
71 }
72 }
73
74 if (cap->max_inline_data > rxe->max_inline_data) {
75 pr_warn("invalid max inline data = %d > %d\n",
76 cap->max_inline_data, rxe->max_inline_data);
77 goto err1;
78 }
79
80 return 0;
81
82err1:
83 return -EINVAL;
84}
85
86int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
87{
88 struct ib_qp_cap *cap = &init->cap;
89 struct rxe_port *port;
90 int port_num = init->port_num;
91
92 if (!init->recv_cq || !init->send_cq) {
93 pr_warn("missing cq\n");
94 goto err1;
95 }
96
97 if (rxe_qp_chk_cap(rxe, cap, !!init->srq))
98 goto err1;
99
100 if (init->qp_type == IB_QPT_SMI || init->qp_type == IB_QPT_GSI) {
101 if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) {
102 pr_warn("invalid port = %d\n", port_num);
103 goto err1;
104 }
105
106 port = &rxe->port;
107
108 if (init->qp_type == IB_QPT_SMI && port->qp_smi_index) {
109 pr_warn("SMI QP exists for port %d\n", port_num);
110 goto err1;
111 }
112
113 if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
114 pr_warn("GSI QP exists for port %d\n", port_num);
115 goto err1;
116 }
117 }
118
119 return 0;
120
121err1:
122 return -EINVAL;
123}
124
125static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n)
126{
127 qp->resp.res_head = 0;
128 qp->resp.res_tail = 0;
129 qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL);
130
131 if (!qp->resp.resources)
132 return -ENOMEM;
133
134 return 0;
135}
136
137static void free_rd_atomic_resources(struct rxe_qp *qp)
138{
139 if (qp->resp.resources) {
140 int i;
141
142 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
143 struct resp_res *res = &qp->resp.resources[i];
144
145 free_rd_atomic_resource(qp, res);
146 }
147 kfree(qp->resp.resources);
148 qp->resp.resources = NULL;
149 }
150}
151
152void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res)
153{
154 if (res->type == RXE_ATOMIC_MASK) {
155 rxe_drop_ref(qp);
156 kfree_skb(res->atomic.skb);
157 } else if (res->type == RXE_READ_MASK) {
158 if (res->read.mr)
159 rxe_drop_ref(res->read.mr);
160 }
161 res->type = 0;
162}
163
164static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
165{
166 int i;
167 struct resp_res *res;
168
169 if (qp->resp.resources) {
170 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
171 res = &qp->resp.resources[i];
172 free_rd_atomic_resource(qp, res);
173 }
174 }
175}
176
177static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
178 struct ib_qp_init_attr *init)
179{
180 struct rxe_port *port;
181 u32 qpn;
182
183 qp->sq_sig_type = init->sq_sig_type;
184 qp->attr.path_mtu = 1;
185 qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu);
186
187 qpn = qp->pelem.index;
188 port = &rxe->port;
189
190 switch (init->qp_type) {
191 case IB_QPT_SMI:
192 qp->ibqp.qp_num = 0;
193 port->qp_smi_index = qpn;
194 qp->attr.port_num = init->port_num;
195 break;
196
197 case IB_QPT_GSI:
198 qp->ibqp.qp_num = 1;
199 port->qp_gsi_index = qpn;
200 qp->attr.port_num = init->port_num;
201 break;
202
203 default:
204 qp->ibqp.qp_num = qpn;
205 break;
206 }
207
208 INIT_LIST_HEAD(&qp->grp_list);
209
210 skb_queue_head_init(&qp->send_pkts);
211
212 spin_lock_init(&qp->grp_lock);
213 spin_lock_init(&qp->state_lock);
214
215 atomic_set(&qp->ssn, 0);
216 atomic_set(&qp->skb_out, 0);
217}
218
219static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
220 struct ib_qp_init_attr *init, struct ib_udata *udata,
221 struct rxe_create_qp_resp __user *uresp)
222{
223 int err;
224 int wqe_size;
225
226 err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
227 if (err < 0)
228 return err;
229 qp->sk->sk->sk_user_data = qp;
230
231
232
233
234
235
236
237
238 qp->src_port = RXE_ROCE_V2_SPORT +
239 (hash_32_generic(qp_num(qp), 14) & 0x3fff);
240 qp->sq.max_wr = init->cap.max_send_wr;
241
242
243 wqe_size = max_t(int, init->cap.max_send_sge * sizeof(struct ib_sge),
244 init->cap.max_inline_data);
245 qp->sq.max_sge = init->cap.max_send_sge =
246 wqe_size / sizeof(struct ib_sge);
247 qp->sq.max_inline = init->cap.max_inline_data = wqe_size;
248 wqe_size += sizeof(struct rxe_send_wqe);
249
250 qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr, wqe_size);
251 if (!qp->sq.queue)
252 return -ENOMEM;
253
254 err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata,
255 qp->sq.queue->buf, qp->sq.queue->buf_size,
256 &qp->sq.queue->ip);
257
258 if (err) {
259 vfree(qp->sq.queue->buf);
260 kfree(qp->sq.queue);
261 return err;
262 }
263
264 qp->req.wqe_index = producer_index(qp->sq.queue);
265 qp->req.state = QP_STATE_RESET;
266 qp->req.opcode = -1;
267 qp->comp.opcode = -1;
268
269 spin_lock_init(&qp->sq.sq_lock);
270 skb_queue_head_init(&qp->req_pkts);
271
272 rxe_init_task(rxe, &qp->req.task, qp,
273 rxe_requester, "req");
274 rxe_init_task(rxe, &qp->comp.task, qp,
275 rxe_completer, "comp");
276
277 qp->qp_timeout_jiffies = 0;
278 if (init->qp_type == IB_QPT_RC) {
279 timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0);
280 timer_setup(&qp->retrans_timer, retransmit_timer, 0);
281 }
282 return 0;
283}
284
285static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
286 struct ib_qp_init_attr *init,
287 struct ib_udata *udata,
288 struct rxe_create_qp_resp __user *uresp)
289{
290 int err;
291 int wqe_size;
292
293 if (!qp->srq) {
294 qp->rq.max_wr = init->cap.max_recv_wr;
295 qp->rq.max_sge = init->cap.max_recv_sge;
296
297 wqe_size = rcv_wqe_size(qp->rq.max_sge);
298
299 pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
300 qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size);
301
302 qp->rq.queue = rxe_queue_init(rxe,
303 &qp->rq.max_wr,
304 wqe_size);
305 if (!qp->rq.queue)
306 return -ENOMEM;
307
308 err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata,
309 qp->rq.queue->buf, qp->rq.queue->buf_size,
310 &qp->rq.queue->ip);
311 if (err) {
312 vfree(qp->rq.queue->buf);
313 kfree(qp->rq.queue);
314 return err;
315 }
316 }
317
318 spin_lock_init(&qp->rq.producer_lock);
319 spin_lock_init(&qp->rq.consumer_lock);
320
321 skb_queue_head_init(&qp->resp_pkts);
322
323 rxe_init_task(rxe, &qp->resp.task, qp,
324 rxe_responder, "resp");
325
326 qp->resp.opcode = OPCODE_NONE;
327 qp->resp.msn = 0;
328 qp->resp.state = QP_STATE_RESET;
329
330 return 0;
331}
332
333
334int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
335 struct ib_qp_init_attr *init,
336 struct rxe_create_qp_resp __user *uresp,
337 struct ib_pd *ibpd,
338 struct ib_udata *udata)
339{
340 int err;
341 struct rxe_cq *rcq = to_rcq(init->recv_cq);
342 struct rxe_cq *scq = to_rcq(init->send_cq);
343 struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
344
345 rxe_add_ref(pd);
346 rxe_add_ref(rcq);
347 rxe_add_ref(scq);
348 if (srq)
349 rxe_add_ref(srq);
350
351 qp->pd = pd;
352 qp->rcq = rcq;
353 qp->scq = scq;
354 qp->srq = srq;
355
356 rxe_qp_init_misc(rxe, qp, init);
357
358 err = rxe_qp_init_req(rxe, qp, init, udata, uresp);
359 if (err)
360 goto err1;
361
362 err = rxe_qp_init_resp(rxe, qp, init, udata, uresp);
363 if (err)
364 goto err2;
365
366 qp->attr.qp_state = IB_QPS_RESET;
367 qp->valid = 1;
368
369 return 0;
370
371err2:
372 rxe_queue_cleanup(qp->sq.queue);
373err1:
374 if (srq)
375 rxe_drop_ref(srq);
376 rxe_drop_ref(scq);
377 rxe_drop_ref(rcq);
378 rxe_drop_ref(pd);
379
380 return err;
381}
382
383
384int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init)
385{
386 init->event_handler = qp->ibqp.event_handler;
387 init->qp_context = qp->ibqp.qp_context;
388 init->send_cq = qp->ibqp.send_cq;
389 init->recv_cq = qp->ibqp.recv_cq;
390 init->srq = qp->ibqp.srq;
391
392 init->cap.max_send_wr = qp->sq.max_wr;
393 init->cap.max_send_sge = qp->sq.max_sge;
394 init->cap.max_inline_data = qp->sq.max_inline;
395
396 if (!qp->srq) {
397 init->cap.max_recv_wr = qp->rq.max_wr;
398 init->cap.max_recv_sge = qp->rq.max_sge;
399 }
400
401 init->sq_sig_type = qp->sq_sig_type;
402
403 init->qp_type = qp->ibqp.qp_type;
404 init->port_num = 1;
405
406 return 0;
407}
408
409
410
411
412int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
413 struct ib_qp_attr *attr, int mask)
414{
415 enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
416 attr->cur_qp_state : qp->attr.qp_state;
417 enum ib_qp_state new_state = (mask & IB_QP_STATE) ?
418 attr->qp_state : cur_state;
419
420 if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) {
421 pr_warn("invalid mask or state for qp\n");
422 goto err1;
423 }
424
425 if (mask & IB_QP_STATE) {
426 if (cur_state == IB_QPS_SQD) {
427 if (qp->req.state == QP_STATE_DRAIN &&
428 new_state != IB_QPS_ERR)
429 goto err1;
430 }
431 }
432
433 if (mask & IB_QP_PORT) {
434 if (!rdma_is_port_valid(&rxe->ib_dev, attr->port_num)) {
435 pr_warn("invalid port %d\n", attr->port_num);
436 goto err1;
437 }
438 }
439
440 if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq))
441 goto err1;
442
443 if (mask & IB_QP_AV && rxe_av_chk_attr(rxe, &attr->ah_attr))
444 goto err1;
445
446 if (mask & IB_QP_ALT_PATH) {
447 if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr))
448 goto err1;
449 if (!rdma_is_port_valid(&rxe->ib_dev, attr->alt_port_num)) {
450 pr_warn("invalid alt port %d\n", attr->alt_port_num);
451 goto err1;
452 }
453 if (attr->alt_timeout > 31) {
454 pr_warn("invalid QP alt timeout %d > 31\n",
455 attr->alt_timeout);
456 goto err1;
457 }
458 }
459
460 if (mask & IB_QP_PATH_MTU) {
461 struct rxe_port *port = &rxe->port;
462
463 enum ib_mtu max_mtu = port->attr.max_mtu;
464 enum ib_mtu mtu = attr->path_mtu;
465
466 if (mtu > max_mtu) {
467 pr_debug("invalid mtu (%d) > (%d)\n",
468 ib_mtu_enum_to_int(mtu),
469 ib_mtu_enum_to_int(max_mtu));
470 goto err1;
471 }
472 }
473
474 if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
475 if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) {
476 pr_warn("invalid max_rd_atomic %d > %d\n",
477 attr->max_rd_atomic,
478 rxe->attr.max_qp_rd_atom);
479 goto err1;
480 }
481 }
482
483 if (mask & IB_QP_TIMEOUT) {
484 if (attr->timeout > 31) {
485 pr_warn("invalid QP timeout %d > 31\n",
486 attr->timeout);
487 goto err1;
488 }
489 }
490
491 return 0;
492
493err1:
494 return -EINVAL;
495}
496
497
498static void rxe_qp_reset(struct rxe_qp *qp)
499{
500
501 rxe_disable_task(&qp->resp.task);
502
503
504 if (qp->sq.queue) {
505 if (qp_type(qp) == IB_QPT_RC)
506 rxe_disable_task(&qp->comp.task);
507 rxe_disable_task(&qp->req.task);
508 }
509
510
511 qp->req.state = QP_STATE_RESET;
512 qp->resp.state = QP_STATE_RESET;
513
514
515
516
517 __rxe_do_task(&qp->resp.task);
518
519 if (qp->sq.queue) {
520 __rxe_do_task(&qp->comp.task);
521 __rxe_do_task(&qp->req.task);
522 rxe_queue_reset(qp->sq.queue);
523 }
524
525
526 atomic_set(&qp->ssn, 0);
527 qp->req.opcode = -1;
528 qp->req.need_retry = 0;
529 qp->req.noack_pkts = 0;
530 qp->resp.msn = 0;
531 qp->resp.opcode = -1;
532 qp->resp.drop_msg = 0;
533 qp->resp.goto_error = 0;
534 qp->resp.sent_psn_nak = 0;
535
536 if (qp->resp.mr) {
537 rxe_drop_ref(qp->resp.mr);
538 qp->resp.mr = NULL;
539 }
540
541 cleanup_rd_atomic_resources(qp);
542
543
544 rxe_enable_task(&qp->resp.task);
545
546 if (qp->sq.queue) {
547 if (qp_type(qp) == IB_QPT_RC)
548 rxe_enable_task(&qp->comp.task);
549
550 rxe_enable_task(&qp->req.task);
551 }
552}
553
554
555static void rxe_qp_drain(struct rxe_qp *qp)
556{
557 if (qp->sq.queue) {
558 if (qp->req.state != QP_STATE_DRAINED) {
559 qp->req.state = QP_STATE_DRAIN;
560 if (qp_type(qp) == IB_QPT_RC)
561 rxe_run_task(&qp->comp.task, 1);
562 else
563 __rxe_do_task(&qp->comp.task);
564 rxe_run_task(&qp->req.task, 1);
565 }
566 }
567}
568
569
570void rxe_qp_error(struct rxe_qp *qp)
571{
572 qp->req.state = QP_STATE_ERROR;
573 qp->resp.state = QP_STATE_ERROR;
574 qp->attr.qp_state = IB_QPS_ERR;
575
576
577 rxe_run_task(&qp->resp.task, 1);
578
579 if (qp_type(qp) == IB_QPT_RC)
580 rxe_run_task(&qp->comp.task, 1);
581 else
582 __rxe_do_task(&qp->comp.task);
583 rxe_run_task(&qp->req.task, 1);
584}
585
586
587int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
588 struct ib_udata *udata)
589{
590 int err;
591
592 if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
593 int max_rd_atomic = attr->max_rd_atomic ?
594 roundup_pow_of_two(attr->max_rd_atomic) : 0;
595
596 qp->attr.max_rd_atomic = max_rd_atomic;
597 atomic_set(&qp->req.rd_atomic, max_rd_atomic);
598 }
599
600 if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
601 int max_dest_rd_atomic = attr->max_dest_rd_atomic ?
602 roundup_pow_of_two(attr->max_dest_rd_atomic) : 0;
603
604 qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
605
606 free_rd_atomic_resources(qp);
607
608 err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
609 if (err)
610 return err;
611 }
612
613 if (mask & IB_QP_CUR_STATE)
614 qp->attr.cur_qp_state = attr->qp_state;
615
616 if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
617 qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;
618
619 if (mask & IB_QP_ACCESS_FLAGS)
620 qp->attr.qp_access_flags = attr->qp_access_flags;
621
622 if (mask & IB_QP_PKEY_INDEX)
623 qp->attr.pkey_index = attr->pkey_index;
624
625 if (mask & IB_QP_PORT)
626 qp->attr.port_num = attr->port_num;
627
628 if (mask & IB_QP_QKEY)
629 qp->attr.qkey = attr->qkey;
630
631 if (mask & IB_QP_AV) {
632 rxe_init_av(&attr->ah_attr, &qp->pri_av);
633 }
634
635 if (mask & IB_QP_ALT_PATH) {
636 rxe_init_av(&attr->alt_ah_attr, &qp->alt_av);
637 qp->attr.alt_port_num = attr->alt_port_num;
638 qp->attr.alt_pkey_index = attr->alt_pkey_index;
639 qp->attr.alt_timeout = attr->alt_timeout;
640 }
641
642 if (mask & IB_QP_PATH_MTU) {
643 qp->attr.path_mtu = attr->path_mtu;
644 qp->mtu = ib_mtu_enum_to_int(attr->path_mtu);
645 }
646
647 if (mask & IB_QP_TIMEOUT) {
648 qp->attr.timeout = attr->timeout;
649 if (attr->timeout == 0) {
650 qp->qp_timeout_jiffies = 0;
651 } else {
652
653 int j = nsecs_to_jiffies(4096ULL << attr->timeout);
654
655 qp->qp_timeout_jiffies = j ? j : 1;
656 }
657 }
658
659 if (mask & IB_QP_RETRY_CNT) {
660 qp->attr.retry_cnt = attr->retry_cnt;
661 qp->comp.retry_cnt = attr->retry_cnt;
662 pr_debug("qp#%d set retry count = %d\n", qp_num(qp),
663 attr->retry_cnt);
664 }
665
666 if (mask & IB_QP_RNR_RETRY) {
667 qp->attr.rnr_retry = attr->rnr_retry;
668 qp->comp.rnr_retry = attr->rnr_retry;
669 pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp),
670 attr->rnr_retry);
671 }
672
673 if (mask & IB_QP_RQ_PSN) {
674 qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
675 qp->resp.psn = qp->attr.rq_psn;
676 pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp),
677 qp->resp.psn);
678 }
679
680 if (mask & IB_QP_MIN_RNR_TIMER) {
681 qp->attr.min_rnr_timer = attr->min_rnr_timer;
682 pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp),
683 attr->min_rnr_timer);
684 }
685
686 if (mask & IB_QP_SQ_PSN) {
687 qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
688 qp->req.psn = qp->attr.sq_psn;
689 qp->comp.psn = qp->attr.sq_psn;
690 pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn);
691 }
692
693 if (mask & IB_QP_PATH_MIG_STATE)
694 qp->attr.path_mig_state = attr->path_mig_state;
695
696 if (mask & IB_QP_DEST_QPN)
697 qp->attr.dest_qp_num = attr->dest_qp_num;
698
699 if (mask & IB_QP_STATE) {
700 qp->attr.qp_state = attr->qp_state;
701
702 switch (attr->qp_state) {
703 case IB_QPS_RESET:
704 pr_debug("qp#%d state -> RESET\n", qp_num(qp));
705 rxe_qp_reset(qp);
706 break;
707
708 case IB_QPS_INIT:
709 pr_debug("qp#%d state -> INIT\n", qp_num(qp));
710 qp->req.state = QP_STATE_INIT;
711 qp->resp.state = QP_STATE_INIT;
712 break;
713
714 case IB_QPS_RTR:
715 pr_debug("qp#%d state -> RTR\n", qp_num(qp));
716 qp->resp.state = QP_STATE_READY;
717 break;
718
719 case IB_QPS_RTS:
720 pr_debug("qp#%d state -> RTS\n", qp_num(qp));
721 qp->req.state = QP_STATE_READY;
722 break;
723
724 case IB_QPS_SQD:
725 pr_debug("qp#%d state -> SQD\n", qp_num(qp));
726 rxe_qp_drain(qp);
727 break;
728
729 case IB_QPS_SQE:
730 pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp));
731
732 break;
733
734 case IB_QPS_ERR:
735 pr_debug("qp#%d state -> ERR\n", qp_num(qp));
736 rxe_qp_error(qp);
737 break;
738 }
739 }
740
741 return 0;
742}
743
744
745int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
746{
747 *attr = qp->attr;
748
749 attr->rq_psn = qp->resp.psn;
750 attr->sq_psn = qp->req.psn;
751
752 attr->cap.max_send_wr = qp->sq.max_wr;
753 attr->cap.max_send_sge = qp->sq.max_sge;
754 attr->cap.max_inline_data = qp->sq.max_inline;
755
756 if (!qp->srq) {
757 attr->cap.max_recv_wr = qp->rq.max_wr;
758 attr->cap.max_recv_sge = qp->rq.max_sge;
759 }
760
761 rxe_av_to_attr(&qp->pri_av, &attr->ah_attr);
762 rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr);
763
764 if (qp->req.state == QP_STATE_DRAIN) {
765 attr->sq_draining = 1;
766
767
768
769
770 cond_resched();
771 } else {
772 attr->sq_draining = 0;
773 }
774
775 pr_debug("attr->sq_draining = %d\n", attr->sq_draining);
776
777 return 0;
778}
779
780
781void rxe_qp_destroy(struct rxe_qp *qp)
782{
783 qp->valid = 0;
784 qp->qp_timeout_jiffies = 0;
785 rxe_cleanup_task(&qp->resp.task);
786
787 if (qp_type(qp) == IB_QPT_RC) {
788 del_timer_sync(&qp->retrans_timer);
789 del_timer_sync(&qp->rnr_nak_timer);
790 }
791
792 rxe_cleanup_task(&qp->req.task);
793 rxe_cleanup_task(&qp->comp.task);
794
795
796 __rxe_do_task(&qp->req.task);
797 if (qp->sq.queue) {
798 __rxe_do_task(&qp->comp.task);
799 __rxe_do_task(&qp->req.task);
800 }
801}
802
803
804static void rxe_qp_do_cleanup(struct work_struct *work)
805{
806 struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
807
808 rxe_drop_all_mcast_groups(qp);
809
810 if (qp->sq.queue)
811 rxe_queue_cleanup(qp->sq.queue);
812
813 if (qp->srq)
814 rxe_drop_ref(qp->srq);
815
816 if (qp->rq.queue)
817 rxe_queue_cleanup(qp->rq.queue);
818
819 if (qp->scq)
820 rxe_drop_ref(qp->scq);
821 if (qp->rcq)
822 rxe_drop_ref(qp->rcq);
823 if (qp->pd)
824 rxe_drop_ref(qp->pd);
825
826 if (qp->resp.mr) {
827 rxe_drop_ref(qp->resp.mr);
828 qp->resp.mr = NULL;
829 }
830
831 if (qp_type(qp) == IB_QPT_RC)
832 sk_dst_reset(qp->sk->sk);
833
834 free_rd_atomic_resources(qp);
835
836 kernel_sock_shutdown(qp->sk, SHUT_RDWR);
837 sock_release(qp->sk);
838}
839
840
841void rxe_qp_cleanup(struct rxe_pool_entry *arg)
842{
843 struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
844
845 execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);
846}
847