1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#include <linux/dma-mapping.h>
33#include <linux/crc32.h>
34#include <linux/iommu.h>
35#include <net/ip.h>
36#include <net/ipv6.h>
37#include <net/udp.h>
38
39#include <rdma/ib_verbs.h>
40#include <rdma/ib_user_verbs.h>
41#include <rdma/iw_cm.h>
42#include <rdma/ib_umem.h>
43#include <rdma/ib_addr.h>
44#include <rdma/ib_cache.h>
45
46#include <linux/qed/qed_if.h>
47#include <linux/qed/qed_rdma_if.h>
48#include "qedr.h"
49#include "verbs.h"
50#include <rdma/qedr-abi.h>
51#include "qedr_roce_cm.h"
52
53void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info)
54{
55 info->gsi_cons = (info->gsi_cons + 1) % info->max_wr;
56}
57
58void qedr_store_gsi_qp_cq(struct qedr_dev *dev, struct qedr_qp *qp,
59 struct ib_qp_init_attr *attrs)
60{
61 dev->gsi_qp_created = 1;
62 dev->gsi_sqcq = get_qedr_cq(attrs->send_cq);
63 dev->gsi_rqcq = get_qedr_cq(attrs->recv_cq);
64 dev->gsi_qp = qp;
65}
66
67static void qedr_ll2_complete_tx_packet(void *cxt, u8 connection_handle,
68 void *cookie,
69 dma_addr_t first_frag_addr,
70 bool b_last_fragment,
71 bool b_last_packet)
72{
73 struct qedr_dev *dev = (struct qedr_dev *)cxt;
74 struct qed_roce_ll2_packet *pkt = cookie;
75 struct qedr_cq *cq = dev->gsi_sqcq;
76 struct qedr_qp *qp = dev->gsi_qp;
77 unsigned long flags;
78
79 DP_DEBUG(dev, QEDR_MSG_GSI,
80 "LL2 TX CB: gsi_sqcq=%p, gsi_rqcq=%p, gsi_cons=%d, ibcq_comp=%s\n",
81 dev->gsi_sqcq, dev->gsi_rqcq, qp->sq.gsi_cons,
82 cq->ibcq.comp_handler ? "Yes" : "No");
83
84 dma_free_coherent(&dev->pdev->dev, pkt->header.len, pkt->header.vaddr,
85 pkt->header.baddr);
86 kfree(pkt);
87
88 spin_lock_irqsave(&qp->q_lock, flags);
89 qedr_inc_sw_gsi_cons(&qp->sq);
90 spin_unlock_irqrestore(&qp->q_lock, flags);
91
92 if (cq->ibcq.comp_handler)
93 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
94}
95
96static void qedr_ll2_complete_rx_packet(void *cxt,
97 struct qed_ll2_comp_rx_data *data)
98{
99 struct qedr_dev *dev = (struct qedr_dev *)cxt;
100 struct qedr_cq *cq = dev->gsi_rqcq;
101 struct qedr_qp *qp = dev->gsi_qp;
102 unsigned long flags;
103
104 spin_lock_irqsave(&qp->q_lock, flags);
105
106 qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ?
107 -EINVAL : 0;
108 qp->rqe_wr_id[qp->rq.gsi_cons].vlan = data->vlan;
109
110 qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length =
111 data->length.data_length;
112 *((u32 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[0]) =
113 ntohl(data->opaque_data_0);
114 *((u16 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[4]) =
115 ntohs((u16)data->opaque_data_1);
116
117 qedr_inc_sw_gsi_cons(&qp->rq);
118
119 spin_unlock_irqrestore(&qp->q_lock, flags);
120
121 if (cq->ibcq.comp_handler)
122 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
123}
124
125static void qedr_ll2_release_rx_packet(void *cxt, u8 connection_handle,
126 void *cookie, dma_addr_t rx_buf_addr,
127 bool b_last_packet)
128{
129
130}
131
132static void qedr_destroy_gsi_cq(struct qedr_dev *dev,
133 struct ib_qp_init_attr *attrs)
134{
135 struct qed_rdma_destroy_cq_in_params iparams;
136 struct qed_rdma_destroy_cq_out_params oparams;
137 struct qedr_cq *cq;
138
139 cq = get_qedr_cq(attrs->send_cq);
140 iparams.icid = cq->icid;
141 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
142 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
143
144 cq = get_qedr_cq(attrs->recv_cq);
145
146 if (iparams.icid != cq->icid) {
147 iparams.icid = cq->icid;
148 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
149 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
150 }
151}
152
153static inline int qedr_check_gsi_qp_attrs(struct qedr_dev *dev,
154 struct ib_qp_init_attr *attrs)
155{
156 if (attrs->cap.max_recv_sge > QEDR_GSI_MAX_RECV_SGE) {
157 DP_ERR(dev,
158 " create gsi qp: failed. max_recv_sge is larger the max %d>%d\n",
159 attrs->cap.max_recv_sge, QEDR_GSI_MAX_RECV_SGE);
160 return -EINVAL;
161 }
162
163 if (attrs->cap.max_recv_wr > QEDR_GSI_MAX_RECV_WR) {
164 DP_ERR(dev,
165 " create gsi qp: failed. max_recv_wr is too large %d>%d\n",
166 attrs->cap.max_recv_wr, QEDR_GSI_MAX_RECV_WR);
167 return -EINVAL;
168 }
169
170 if (attrs->cap.max_send_wr > QEDR_GSI_MAX_SEND_WR) {
171 DP_ERR(dev,
172 " create gsi qp: failed. max_send_wr is too large %d>%d\n",
173 attrs->cap.max_send_wr, QEDR_GSI_MAX_SEND_WR);
174 return -EINVAL;
175 }
176
177 return 0;
178}
179
180static int qedr_ll2_post_tx(struct qedr_dev *dev,
181 struct qed_roce_ll2_packet *pkt)
182{
183 enum qed_ll2_roce_flavor_type roce_flavor;
184 struct qed_ll2_tx_pkt_info ll2_tx_pkt;
185 int rc;
186 int i;
187
188 memset(&ll2_tx_pkt, 0, sizeof(ll2_tx_pkt));
189
190 roce_flavor = (pkt->roce_mode == ROCE_V1) ?
191 QED_LL2_ROCE : QED_LL2_RROCE;
192
193 if (pkt->roce_mode == ROCE_V2_IPV4)
194 ll2_tx_pkt.enable_ip_cksum = 1;
195
196 ll2_tx_pkt.num_of_bds = 1 + pkt->n_seg;
197 ll2_tx_pkt.vlan = 0;
198 ll2_tx_pkt.tx_dest = pkt->tx_dest;
199 ll2_tx_pkt.qed_roce_flavor = roce_flavor;
200 ll2_tx_pkt.first_frag = pkt->header.baddr;
201 ll2_tx_pkt.first_frag_len = pkt->header.len;
202 ll2_tx_pkt.cookie = pkt;
203
204
205 rc = dev->ops->ll2_prepare_tx_packet(dev->rdma_ctx,
206 dev->gsi_ll2_handle,
207 &ll2_tx_pkt, 1);
208 if (rc) {
209
210 dma_free_coherent(&dev->pdev->dev, pkt->header.len,
211 pkt->header.vaddr, pkt->header.baddr);
212 kfree(pkt);
213
214 DP_ERR(dev, "roce ll2 tx: header failed (rc=%d)\n", rc);
215 return rc;
216 }
217
218
219 for (i = 0; i < pkt->n_seg; i++) {
220 rc = dev->ops->ll2_set_fragment_of_tx_packet(
221 dev->rdma_ctx,
222 dev->gsi_ll2_handle,
223 pkt->payload[i].baddr,
224 pkt->payload[i].len);
225
226 if (rc) {
227
228
229
230
231 DP_ERR(dev, "ll2 tx: payload failed (rc=%d)\n", rc);
232 return rc;
233 }
234 }
235
236 return 0;
237}
238
239static int qedr_ll2_stop(struct qedr_dev *dev)
240{
241 int rc;
242
243 if (dev->gsi_ll2_handle == QED_LL2_UNUSED_HANDLE)
244 return 0;
245
246
247 rc = dev->ops->ll2_set_mac_filter(dev->cdev,
248 dev->gsi_ll2_mac_address, NULL);
249
250 rc = dev->ops->ll2_terminate_connection(dev->rdma_ctx,
251 dev->gsi_ll2_handle);
252 if (rc)
253 DP_ERR(dev, "Failed to terminate LL2 connection (rc=%d)\n", rc);
254
255 dev->ops->ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
256
257 dev->gsi_ll2_handle = QED_LL2_UNUSED_HANDLE;
258
259 return rc;
260}
261
262static int qedr_ll2_start(struct qedr_dev *dev,
263 struct ib_qp_init_attr *attrs, struct qedr_qp *qp)
264{
265 struct qed_ll2_acquire_data data;
266 struct qed_ll2_cbs cbs;
267 int rc;
268
269
270 cbs.rx_comp_cb = qedr_ll2_complete_rx_packet;
271 cbs.tx_comp_cb = qedr_ll2_complete_tx_packet;
272 cbs.rx_release_cb = qedr_ll2_release_rx_packet;
273 cbs.tx_release_cb = qedr_ll2_complete_tx_packet;
274 cbs.cookie = dev;
275
276 memset(&data, 0, sizeof(data));
277 data.input.conn_type = QED_LL2_TYPE_ROCE;
278 data.input.mtu = dev->ndev->mtu;
279 data.input.rx_num_desc = attrs->cap.max_recv_wr;
280 data.input.rx_drop_ttl0_flg = true;
281 data.input.rx_vlan_removal_en = false;
282 data.input.tx_num_desc = attrs->cap.max_send_wr;
283 data.input.tx_tc = 0;
284 data.input.tx_dest = QED_LL2_TX_DEST_NW;
285 data.input.ai_err_packet_too_big = QED_LL2_DROP_PACKET;
286 data.input.ai_err_no_buf = QED_LL2_DROP_PACKET;
287 data.input.gsi_enable = 1;
288 data.p_connection_handle = &dev->gsi_ll2_handle;
289 data.cbs = &cbs;
290
291 rc = dev->ops->ll2_acquire_connection(dev->rdma_ctx, &data);
292 if (rc) {
293 DP_ERR(dev,
294 "ll2 start: failed to acquire LL2 connection (rc=%d)\n",
295 rc);
296 return rc;
297 }
298
299 rc = dev->ops->ll2_establish_connection(dev->rdma_ctx,
300 dev->gsi_ll2_handle);
301 if (rc) {
302 DP_ERR(dev,
303 "ll2 start: failed to establish LL2 connection (rc=%d)\n",
304 rc);
305 goto err1;
306 }
307
308 rc = dev->ops->ll2_set_mac_filter(dev->cdev, NULL, dev->ndev->dev_addr);
309 if (rc)
310 goto err2;
311
312 return 0;
313
314err2:
315 dev->ops->ll2_terminate_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
316err1:
317 dev->ops->ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
318
319 return rc;
320}
321
322int qedr_create_gsi_qp(struct qedr_dev *dev, struct ib_qp_init_attr *attrs,
323 struct qedr_qp *qp)
324{
325 int rc;
326
327 rc = qedr_check_gsi_qp_attrs(dev, attrs);
328 if (rc)
329 return rc;
330
331 rc = qedr_ll2_start(dev, attrs, qp);
332 if (rc) {
333 DP_ERR(dev, "create gsi qp: failed on ll2 start. rc=%d\n", rc);
334 return rc;
335 }
336
337
338 qp->ibqp.qp_num = 1;
339 qp->rq.max_wr = attrs->cap.max_recv_wr;
340 qp->sq.max_wr = attrs->cap.max_send_wr;
341
342 qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
343 GFP_KERNEL);
344 if (!qp->rqe_wr_id)
345 goto err;
346 qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
347 GFP_KERNEL);
348 if (!qp->wqe_wr_id)
349 goto err;
350
351 qedr_store_gsi_qp_cq(dev, qp, attrs);
352 ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
353
354
355 qedr_destroy_gsi_cq(dev, attrs);
356 dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI;
357 dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI;
358
359 DP_DEBUG(dev, QEDR_MSG_GSI, "created GSI QP %p\n", qp);
360
361 return 0;
362
363err:
364 kfree(qp->rqe_wr_id);
365
366 rc = qedr_ll2_stop(dev);
367 if (rc)
368 DP_ERR(dev, "create gsi qp: failed destroy on create\n");
369
370 return -ENOMEM;
371}
372
373int qedr_destroy_gsi_qp(struct qedr_dev *dev)
374{
375 return qedr_ll2_stop(dev);
376}
377
378#define QEDR_MAX_UD_HEADER_SIZE (100)
379#define QEDR_GSI_QPN (1)
380static inline int qedr_gsi_build_header(struct qedr_dev *dev,
381 struct qedr_qp *qp,
382 const struct ib_send_wr *swr,
383 struct ib_ud_header *udh,
384 int *roce_mode)
385{
386 bool has_vlan = false, has_grh_ipv6 = true;
387 struct rdma_ah_attr *ah_attr = &get_qedr_ah(ud_wr(swr)->ah)->attr;
388 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
389 const struct ib_gid_attr *sgid_attr = grh->sgid_attr;
390 int send_size = 0;
391 u16 vlan_id = 0;
392 u16 ether_type;
393 int rc;
394 int ip_ver = 0;
395
396 bool has_udp = false;
397 int i;
398
399 rc = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, NULL);
400 if (rc)
401 return rc;
402
403 if (vlan_id < VLAN_CFI_MASK)
404 has_vlan = true;
405
406 send_size = 0;
407 for (i = 0; i < swr->num_sge; ++i)
408 send_size += swr->sg_list[i].length;
409
410 has_udp = (sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP);
411 if (!has_udp) {
412
413 ether_type = ETH_P_IBOE;
414 *roce_mode = ROCE_V1;
415 } else if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) {
416
417 ip_ver = 4;
418 ether_type = ETH_P_IP;
419 has_grh_ipv6 = false;
420 *roce_mode = ROCE_V2_IPV4;
421 } else {
422
423 ip_ver = 6;
424 ether_type = ETH_P_IPV6;
425 *roce_mode = ROCE_V2_IPV6;
426 }
427
428 rc = ib_ud_header_init(send_size, false, true, has_vlan,
429 has_grh_ipv6, ip_ver, has_udp, 0, udh);
430 if (rc) {
431 DP_ERR(dev, "gsi post send: failed to init header\n");
432 return rc;
433 }
434
435
436 ether_addr_copy(udh->eth.dmac_h, ah_attr->roce.dmac);
437 ether_addr_copy(udh->eth.smac_h, dev->ndev->dev_addr);
438 if (has_vlan) {
439 udh->eth.type = htons(ETH_P_8021Q);
440 udh->vlan.tag = htons(vlan_id);
441 udh->vlan.type = htons(ether_type);
442 } else {
443 udh->eth.type = htons(ether_type);
444 }
445
446
447 udh->bth.solicited_event = !!(swr->send_flags & IB_SEND_SOLICITED);
448 udh->bth.pkey = QEDR_ROCE_PKEY_DEFAULT;
449 udh->bth.destination_qpn = htonl(ud_wr(swr)->remote_qpn);
450 udh->bth.psn = htonl((qp->sq_psn++) & ((1 << 24) - 1));
451 udh->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
452
453
454 udh->deth.qkey = htonl(0x80010000);
455 udh->deth.source_qpn = htonl(QEDR_GSI_QPN);
456
457 if (has_grh_ipv6) {
458
459 udh->grh.traffic_class = grh->traffic_class;
460 udh->grh.flow_label = grh->flow_label;
461 udh->grh.hop_limit = grh->hop_limit;
462 udh->grh.destination_gid = grh->dgid;
463 memcpy(&udh->grh.source_gid.raw, sgid_attr->gid.raw,
464 sizeof(udh->grh.source_gid.raw));
465 } else {
466
467 u32 ipv4_addr;
468
469 udh->ip4.protocol = IPPROTO_UDP;
470 udh->ip4.tos = htonl(grh->flow_label);
471 udh->ip4.frag_off = htons(IP_DF);
472 udh->ip4.ttl = grh->hop_limit;
473
474 ipv4_addr = qedr_get_ipv4_from_gid(sgid_attr->gid.raw);
475 udh->ip4.saddr = ipv4_addr;
476 ipv4_addr = qedr_get_ipv4_from_gid(grh->dgid.raw);
477 udh->ip4.daddr = ipv4_addr;
478
479 }
480
481
482 if (has_udp) {
483 udh->udp.sport = htons(QEDR_ROCE_V2_UDP_SPORT);
484 udh->udp.dport = htons(ROCE_V2_UDP_DPORT);
485 udh->udp.csum = 0;
486
487 }
488 return 0;
489}
490
491static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
492 struct qedr_qp *qp,
493 const struct ib_send_wr *swr,
494 struct qed_roce_ll2_packet **p_packet)
495{
496 u8 ud_header_buffer[QEDR_MAX_UD_HEADER_SIZE];
497 struct qed_roce_ll2_packet *packet;
498 struct pci_dev *pdev = dev->pdev;
499 int roce_mode, header_size;
500 struct ib_ud_header udh;
501 int i, rc;
502
503 *p_packet = NULL;
504
505 rc = qedr_gsi_build_header(dev, qp, swr, &udh, &roce_mode);
506 if (rc)
507 return rc;
508
509 header_size = ib_ud_header_pack(&udh, &ud_header_buffer);
510
511 packet = kzalloc(sizeof(*packet), GFP_ATOMIC);
512 if (!packet)
513 return -ENOMEM;
514
515 packet->header.vaddr = dma_alloc_coherent(&pdev->dev, header_size,
516 &packet->header.baddr,
517 GFP_ATOMIC);
518 if (!packet->header.vaddr) {
519 kfree(packet);
520 return -ENOMEM;
521 }
522
523 if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h))
524 packet->tx_dest = QED_LL2_TX_DEST_LB;
525 else
526 packet->tx_dest = QED_LL2_TX_DEST_NW;
527
528 packet->roce_mode = roce_mode;
529 memcpy(packet->header.vaddr, ud_header_buffer, header_size);
530 packet->header.len = header_size;
531 packet->n_seg = swr->num_sge;
532 for (i = 0; i < packet->n_seg; i++) {
533 packet->payload[i].baddr = swr->sg_list[i].addr;
534 packet->payload[i].len = swr->sg_list[i].length;
535 }
536
537 *p_packet = packet;
538
539 return 0;
540}
541
542int qedr_gsi_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
543 const struct ib_send_wr **bad_wr)
544{
545 struct qed_roce_ll2_packet *pkt = NULL;
546 struct qedr_qp *qp = get_qedr_qp(ibqp);
547 struct qedr_dev *dev = qp->dev;
548 unsigned long flags;
549 int rc;
550
551 if (qp->state != QED_ROCE_QP_STATE_RTS) {
552 *bad_wr = wr;
553 DP_ERR(dev,
554 "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTS\n",
555 qp->state);
556 return -EINVAL;
557 }
558
559 if (wr->num_sge > RDMA_MAX_SGE_PER_SQ_WQE) {
560 DP_ERR(dev, "gsi post send: num_sge is too large (%d>%d)\n",
561 wr->num_sge, RDMA_MAX_SGE_PER_SQ_WQE);
562 rc = -EINVAL;
563 goto err;
564 }
565
566 if (wr->opcode != IB_WR_SEND) {
567 DP_ERR(dev,
568 "gsi post send: failed due to unsupported opcode %d\n",
569 wr->opcode);
570 rc = -EINVAL;
571 goto err;
572 }
573
574 spin_lock_irqsave(&qp->q_lock, flags);
575
576 rc = qedr_gsi_build_packet(dev, qp, wr, &pkt);
577 if (rc) {
578 spin_unlock_irqrestore(&qp->q_lock, flags);
579 goto err;
580 }
581
582 rc = qedr_ll2_post_tx(dev, pkt);
583
584 if (!rc) {
585 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
586 qedr_inc_sw_prod(&qp->sq);
587 DP_DEBUG(qp->dev, QEDR_MSG_GSI,
588 "gsi post send: opcode=%d, wr_id=%llx\n", wr->opcode,
589 wr->wr_id);
590 } else {
591 DP_ERR(dev, "gsi post send: failed to transmit (rc=%d)\n", rc);
592 rc = -EAGAIN;
593 *bad_wr = wr;
594 }
595
596 spin_unlock_irqrestore(&qp->q_lock, flags);
597
598 if (wr->next) {
599 DP_ERR(dev,
600 "gsi post send: failed second WR. Only one WR may be passed at a time\n");
601 *bad_wr = wr->next;
602 rc = -EINVAL;
603 }
604
605 return rc;
606
607err:
608 *bad_wr = wr;
609 return rc;
610}
611
612int qedr_gsi_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
613 const struct ib_recv_wr **bad_wr)
614{
615 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
616 struct qedr_qp *qp = get_qedr_qp(ibqp);
617 unsigned long flags;
618 int rc = 0;
619
620 if ((qp->state != QED_ROCE_QP_STATE_RTR) &&
621 (qp->state != QED_ROCE_QP_STATE_RTS)) {
622 *bad_wr = wr;
623 DP_ERR(dev,
624 "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTR/S\n",
625 qp->state);
626 return -EINVAL;
627 }
628
629 spin_lock_irqsave(&qp->q_lock, flags);
630
631 while (wr) {
632 if (wr->num_sge > QEDR_GSI_MAX_RECV_SGE) {
633 DP_ERR(dev,
634 "gsi post recv: failed to post rx buffer. too many sges %d>%d\n",
635 wr->num_sge, QEDR_GSI_MAX_RECV_SGE);
636 goto err;
637 }
638
639 rc = dev->ops->ll2_post_rx_buffer(dev->rdma_ctx,
640 dev->gsi_ll2_handle,
641 wr->sg_list[0].addr,
642 wr->sg_list[0].length,
643 NULL ,
644 1 );
645 if (rc) {
646 DP_ERR(dev,
647 "gsi post recv: failed to post rx buffer (rc=%d)\n",
648 rc);
649 goto err;
650 }
651
652 memset(&qp->rqe_wr_id[qp->rq.prod], 0,
653 sizeof(qp->rqe_wr_id[qp->rq.prod]));
654 qp->rqe_wr_id[qp->rq.prod].sg_list[0] = wr->sg_list[0];
655 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
656
657 qedr_inc_sw_prod(&qp->rq);
658
659 wr = wr->next;
660 }
661
662 spin_unlock_irqrestore(&qp->q_lock, flags);
663
664 return rc;
665err:
666 spin_unlock_irqrestore(&qp->q_lock, flags);
667 *bad_wr = wr;
668 return -ENOMEM;
669}
670
671int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
672{
673 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
674 struct qedr_cq *cq = get_qedr_cq(ibcq);
675 struct qedr_qp *qp = dev->gsi_qp;
676 unsigned long flags;
677 u16 vlan_id;
678 int i = 0;
679
680 spin_lock_irqsave(&cq->cq_lock, flags);
681
682 while (i < num_entries && qp->rq.cons != qp->rq.gsi_cons) {
683 memset(&wc[i], 0, sizeof(*wc));
684
685 wc[i].qp = &qp->ibqp;
686 wc[i].wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
687 wc[i].opcode = IB_WC_RECV;
688 wc[i].pkey_index = 0;
689 wc[i].status = (qp->rqe_wr_id[qp->rq.cons].rc) ?
690 IB_WC_GENERAL_ERR : IB_WC_SUCCESS;
691
692 wc[i].byte_len = qp->rqe_wr_id[qp->rq.cons].sg_list[0].length;
693 wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK;
694 ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac);
695 wc[i].wc_flags |= IB_WC_WITH_SMAC;
696
697 vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan &
698 VLAN_VID_MASK;
699 if (vlan_id) {
700 wc[i].wc_flags |= IB_WC_WITH_VLAN;
701 wc[i].vlan_id = vlan_id;
702 wc[i].sl = (qp->rqe_wr_id[qp->rq.cons].vlan &
703 VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
704 }
705
706 qedr_inc_sw_cons(&qp->rq);
707 i++;
708 }
709
710 while (i < num_entries && qp->sq.cons != qp->sq.gsi_cons) {
711 memset(&wc[i], 0, sizeof(*wc));
712
713 wc[i].qp = &qp->ibqp;
714 wc[i].wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
715 wc[i].opcode = IB_WC_SEND;
716 wc[i].status = IB_WC_SUCCESS;
717
718 qedr_inc_sw_cons(&qp->sq);
719 i++;
720 }
721
722 spin_unlock_irqrestore(&cq->cq_lock, flags);
723
724 DP_DEBUG(dev, QEDR_MSG_GSI,
725 "gsi poll_cq: requested entries=%d, actual=%d, qp->rq.cons=%d, qp->rq.gsi_cons=%x, qp->sq.cons=%d, qp->sq.gsi_cons=%d, qp_num=%d\n",
726 num_entries, i, qp->rq.cons, qp->rq.gsi_cons, qp->sq.cons,
727 qp->sq.gsi_cons, qp->ibqp.qp_num);
728
729 return i;
730}
731