1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#include "iwch_provider.h"
33#include "iwch.h"
34
35
36
37
38
39
40
41
42
43
44static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
45 struct ib_wc *wc)
46{
47 struct iwch_qp *qhp = NULL;
48 struct t3_cqe cqe, *rd_cqe;
49 struct t3_wq *wq;
50 u32 credit = 0;
51 u8 cqe_flushed;
52 u64 cookie;
53 int ret = 1;
54
55 rd_cqe = cxio_next_cqe(&chp->cq);
56
57 if (!rd_cqe)
58 return 0;
59
60 qhp = get_qhp(rhp, CQE_QPID(*rd_cqe));
61 if (!qhp)
62 wq = NULL;
63 else {
64 spin_lock(&qhp->lock);
65 wq = &(qhp->wq);
66 }
67 ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie,
68 &credit);
69 if (t3a_device(chp->rhp) && credit) {
70 PDBG("%s updating %d cq credits on id %d\n", __FUNCTION__,
71 credit, chp->cq.cqid);
72 cxio_hal_cq_op(&rhp->rdev, &chp->cq, CQ_CREDIT_UPDATE, credit);
73 }
74
75 if (ret) {
76 ret = -EAGAIN;
77 goto out;
78 }
79 ret = 1;
80
81 wc->wr_id = cookie;
82 wc->qp = &qhp->ibqp;
83 wc->vendor_err = CQE_STATUS(cqe);
84
85 PDBG("%s qpid 0x%x type %d opcode %d status 0x%x wrid hi 0x%x "
86 "lo 0x%x cookie 0x%llx\n", __FUNCTION__,
87 CQE_QPID(cqe), CQE_TYPE(cqe),
88 CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe),
89 CQE_WRID_LOW(cqe), (unsigned long long) cookie);
90
91 if (CQE_TYPE(cqe) == 0) {
92 if (!CQE_STATUS(cqe))
93 wc->byte_len = CQE_LEN(cqe);
94 else
95 wc->byte_len = 0;
96 wc->opcode = IB_WC_RECV;
97 } else {
98 switch (CQE_OPCODE(cqe)) {
99 case T3_RDMA_WRITE:
100 wc->opcode = IB_WC_RDMA_WRITE;
101 break;
102 case T3_READ_REQ:
103 wc->opcode = IB_WC_RDMA_READ;
104 wc->byte_len = CQE_LEN(cqe);
105 break;
106 case T3_SEND:
107 case T3_SEND_WITH_SE:
108 wc->opcode = IB_WC_SEND;
109 break;
110 case T3_BIND_MW:
111 wc->opcode = IB_WC_BIND_MW;
112 break;
113
114
115 case T3_SEND_WITH_INV:
116 case T3_SEND_WITH_SE_INV:
117 case T3_LOCAL_INV:
118 case T3_FAST_REGISTER:
119 default:
120 printk(KERN_ERR MOD "Unexpected opcode %d "
121 "in the CQE received for QPID=0x%0x\n",
122 CQE_OPCODE(cqe), CQE_QPID(cqe));
123 ret = -EINVAL;
124 goto out;
125 }
126 }
127
128 if (cqe_flushed)
129 wc->status = IB_WC_WR_FLUSH_ERR;
130 else {
131
132 switch (CQE_STATUS(cqe)) {
133 case TPT_ERR_SUCCESS:
134 wc->status = IB_WC_SUCCESS;
135 break;
136 case TPT_ERR_STAG:
137 wc->status = IB_WC_LOC_ACCESS_ERR;
138 break;
139 case TPT_ERR_PDID:
140 wc->status = IB_WC_LOC_PROT_ERR;
141 break;
142 case TPT_ERR_QPID:
143 case TPT_ERR_ACCESS:
144 wc->status = IB_WC_LOC_ACCESS_ERR;
145 break;
146 case TPT_ERR_WRAP:
147 wc->status = IB_WC_GENERAL_ERR;
148 break;
149 case TPT_ERR_BOUND:
150 wc->status = IB_WC_LOC_LEN_ERR;
151 break;
152 case TPT_ERR_INVALIDATE_SHARED_MR:
153 case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
154 wc->status = IB_WC_MW_BIND_ERR;
155 break;
156 case TPT_ERR_CRC:
157 case TPT_ERR_MARKER:
158 case TPT_ERR_PDU_LEN_ERR:
159 case TPT_ERR_OUT_OF_RQE:
160 case TPT_ERR_DDP_VERSION:
161 case TPT_ERR_RDMA_VERSION:
162 case TPT_ERR_DDP_QUEUE_NUM:
163 case TPT_ERR_MSN:
164 case TPT_ERR_TBIT:
165 case TPT_ERR_MO:
166 case TPT_ERR_MSN_RANGE:
167 case TPT_ERR_IRD_OVERFLOW:
168 case TPT_ERR_OPCODE:
169 wc->status = IB_WC_FATAL_ERR;
170 break;
171 case TPT_ERR_SWFLUSH:
172 wc->status = IB_WC_WR_FLUSH_ERR;
173 break;
174 default:
175 printk(KERN_ERR MOD "Unexpected cqe_status 0x%x for "
176 "QPID=0x%0x\n", CQE_STATUS(cqe), CQE_QPID(cqe));
177 ret = -EINVAL;
178 }
179 }
180out:
181 if (wq)
182 spin_unlock(&qhp->lock);
183 return ret;
184}
185
186int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
187{
188 struct iwch_dev *rhp;
189 struct iwch_cq *chp;
190 unsigned long flags;
191 int npolled;
192 int err = 0;
193
194 chp = to_iwch_cq(ibcq);
195 rhp = chp->rhp;
196
197 spin_lock_irqsave(&chp->lock, flags);
198 for (npolled = 0; npolled < num_entries; ++npolled) {
199#ifdef DEBUG
200 int i=0;
201#endif
202
203
204
205
206
207
208 do {
209 err = iwch_poll_cq_one(rhp, chp, wc + npolled);
210#ifdef DEBUG
211 BUG_ON(++i > 1000);
212#endif
213 } while (err == -EAGAIN);
214 if (err <= 0)
215 break;
216 }
217 spin_unlock_irqrestore(&chp->lock, flags);
218
219 if (err < 0)
220 return err;
221 else {
222 return npolled;
223 }
224}
225