1
2
3#include "osdep.h"
4#include "status.h"
5#include "hmc.h"
6#include "defs.h"
7#include "type.h"
8#include "protos.h"
9#include "puda.h"
10#include "ws.h"
11
12static void irdma_ieq_receive(struct irdma_sc_vsi *vsi,
13 struct irdma_puda_buf *buf);
14static void irdma_ieq_tx_compl(struct irdma_sc_vsi *vsi, void *sqwrid);
15static void irdma_ilq_putback_rcvbuf(struct irdma_sc_qp *qp,
16 struct irdma_puda_buf *buf, u32 wqe_idx);
17
18
19
20
21static struct irdma_puda_buf *irdma_puda_get_listbuf(struct list_head *list)
22{
23 struct irdma_puda_buf *buf = NULL;
24
25 if (!list_empty(list)) {
26 buf = (struct irdma_puda_buf *)list->next;
27 list_del((struct list_head *)&buf->list);
28 }
29
30 return buf;
31}
32
33
34
35
36
37struct irdma_puda_buf *irdma_puda_get_bufpool(struct irdma_puda_rsrc *rsrc)
38{
39 struct irdma_puda_buf *buf = NULL;
40 struct list_head *list = &rsrc->bufpool;
41 unsigned long flags;
42
43 spin_lock_irqsave(&rsrc->bufpool_lock, flags);
44 buf = irdma_puda_get_listbuf(list);
45 if (buf) {
46 rsrc->avail_buf_count--;
47 buf->vsi = rsrc->vsi;
48 } else {
49 rsrc->stats_buf_alloc_fail++;
50 }
51 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
52
53 return buf;
54}
55
56
57
58
59
60
61void irdma_puda_ret_bufpool(struct irdma_puda_rsrc *rsrc,
62 struct irdma_puda_buf *buf)
63{
64 unsigned long flags;
65
66 buf->do_lpb = false;
67 spin_lock_irqsave(&rsrc->bufpool_lock, flags);
68 list_add(&buf->list, &rsrc->bufpool);
69 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
70 rsrc->avail_buf_count++;
71}
72
73
74
75
76
77
78
79
80static void irdma_puda_post_recvbuf(struct irdma_puda_rsrc *rsrc, u32 wqe_idx,
81 struct irdma_puda_buf *buf, bool initial)
82{
83 __le64 *wqe;
84 struct irdma_sc_qp *qp = &rsrc->qp;
85 u64 offset24 = 0;
86
87
88 dma_sync_single_for_device(rsrc->dev->hw->device, buf->mem.pa,
89 buf->mem.size, DMA_BIDIRECTIONAL);
90 qp->qp_uk.rq_wrid_array[wqe_idx] = (uintptr_t)buf;
91 wqe = qp->qp_uk.rq_base[wqe_idx].elem;
92 if (!initial)
93 get_64bit_val(wqe, 24, &offset24);
94
95 offset24 = (offset24) ? 0 : FIELD_PREP(IRDMAQPSQ_VALID, 1);
96
97 set_64bit_val(wqe, 16, 0);
98 set_64bit_val(wqe, 0, buf->mem.pa);
99 if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
100 set_64bit_val(wqe, 8,
101 FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, buf->mem.size));
102 } else {
103 set_64bit_val(wqe, 8,
104 FIELD_PREP(IRDMAQPSQ_FRAG_LEN, buf->mem.size) |
105 offset24);
106 }
107 dma_wmb();
108
109 set_64bit_val(wqe, 24, offset24);
110}
111
112
113
114
115
116
117static enum irdma_status_code
118irdma_puda_replenish_rq(struct irdma_puda_rsrc *rsrc, bool initial)
119{
120 u32 i;
121 u32 invalid_cnt = rsrc->rxq_invalid_cnt;
122 struct irdma_puda_buf *buf = NULL;
123
124 for (i = 0; i < invalid_cnt; i++) {
125 buf = irdma_puda_get_bufpool(rsrc);
126 if (!buf)
127 return IRDMA_ERR_list_empty;
128 irdma_puda_post_recvbuf(rsrc, rsrc->rx_wqe_idx, buf, initial);
129 rsrc->rx_wqe_idx = ((rsrc->rx_wqe_idx + 1) % rsrc->rq_size);
130 rsrc->rxq_invalid_cnt--;
131 }
132
133 return 0;
134}
135
136
137
138
139
140
141static struct irdma_puda_buf *irdma_puda_alloc_buf(struct irdma_sc_dev *dev,
142 u32 len)
143{
144 struct irdma_puda_buf *buf;
145 struct irdma_virt_mem buf_mem;
146
147 buf_mem.size = sizeof(struct irdma_puda_buf);
148 buf_mem.va = kzalloc(buf_mem.size, GFP_KERNEL);
149 if (!buf_mem.va)
150 return NULL;
151
152 buf = buf_mem.va;
153 buf->mem.size = len;
154 buf->mem.va = kzalloc(buf->mem.size, GFP_KERNEL);
155 if (!buf->mem.va)
156 goto free_virt;
157 buf->mem.pa = dma_map_single(dev->hw->device, buf->mem.va,
158 buf->mem.size, DMA_BIDIRECTIONAL);
159 if (dma_mapping_error(dev->hw->device, buf->mem.pa)) {
160 kfree(buf->mem.va);
161 goto free_virt;
162 }
163
164 buf->buf_mem.va = buf_mem.va;
165 buf->buf_mem.size = buf_mem.size;
166
167 return buf;
168
169free_virt:
170 kfree(buf_mem.va);
171 return NULL;
172}
173
174
175
176
177
178
179static void irdma_puda_dele_buf(struct irdma_sc_dev *dev,
180 struct irdma_puda_buf *buf)
181{
182 dma_unmap_single(dev->hw->device, buf->mem.pa, buf->mem.size,
183 DMA_BIDIRECTIONAL);
184 kfree(buf->mem.va);
185 kfree(buf->buf_mem.va);
186}
187
188
189
190
191
192
193static __le64 *irdma_puda_get_next_send_wqe(struct irdma_qp_uk *qp,
194 u32 *wqe_idx)
195{
196 __le64 *wqe = NULL;
197 enum irdma_status_code ret_code = 0;
198
199 *wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
200 if (!*wqe_idx)
201 qp->swqe_polarity = !qp->swqe_polarity;
202 IRDMA_RING_MOVE_HEAD(qp->sq_ring, ret_code);
203 if (ret_code)
204 return wqe;
205
206 wqe = qp->sq_base[*wqe_idx].elem;
207
208 return wqe;
209}
210
211
212
213
214
215
216static enum irdma_status_code
217irdma_puda_poll_info(struct irdma_sc_cq *cq, struct irdma_puda_cmpl_info *info)
218{
219 struct irdma_cq_uk *cq_uk = &cq->cq_uk;
220 u64 qword0, qword2, qword3, qword6;
221 __le64 *cqe;
222 __le64 *ext_cqe = NULL;
223 u64 qword7 = 0;
224 u64 comp_ctx;
225 bool valid_bit;
226 bool ext_valid = 0;
227 u32 major_err, minor_err;
228 u32 peek_head;
229 bool error;
230 u8 polarity;
231
232 cqe = IRDMA_GET_CURRENT_CQ_ELEM(&cq->cq_uk);
233 get_64bit_val(cqe, 24, &qword3);
234 valid_bit = (bool)FIELD_GET(IRDMA_CQ_VALID, qword3);
235 if (valid_bit != cq_uk->polarity)
236 return IRDMA_ERR_Q_EMPTY;
237
238 if (cq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
239 ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
240
241 if (ext_valid) {
242 peek_head = (cq_uk->cq_ring.head + 1) % cq_uk->cq_ring.size;
243 ext_cqe = cq_uk->cq_base[peek_head].buf;
244 get_64bit_val(ext_cqe, 24, &qword7);
245 polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
246 if (!peek_head)
247 polarity ^= 1;
248 if (polarity != cq_uk->polarity)
249 return IRDMA_ERR_Q_EMPTY;
250
251 IRDMA_RING_MOVE_HEAD_NOCHECK(cq_uk->cq_ring);
252 if (!IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring))
253 cq_uk->polarity = !cq_uk->polarity;
254
255 IRDMA_RING_MOVE_TAIL(cq_uk->cq_ring);
256 }
257
258 print_hex_dump_debug("PUDA: PUDA CQE", DUMP_PREFIX_OFFSET, 16, 8, cqe,
259 32, false);
260 if (ext_valid)
261 print_hex_dump_debug("PUDA: PUDA EXT-CQE", DUMP_PREFIX_OFFSET,
262 16, 8, ext_cqe, 32, false);
263
264 error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
265 if (error) {
266 ibdev_dbg(to_ibdev(cq->dev), "PUDA: receive error\n");
267 major_err = (u32)(FIELD_GET(IRDMA_CQ_MAJERR, qword3));
268 minor_err = (u32)(FIELD_GET(IRDMA_CQ_MINERR, qword3));
269 info->compl_error = major_err << 16 | minor_err;
270 return IRDMA_ERR_CQ_COMPL_ERROR;
271 }
272
273 get_64bit_val(cqe, 0, &qword0);
274 get_64bit_val(cqe, 16, &qword2);
275
276 info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
277 info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
278 if (cq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
279 info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
280
281 get_64bit_val(cqe, 8, &comp_ctx);
282 info->qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx;
283 info->wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
284
285 if (info->q_type == IRDMA_CQE_QTYPE_RQ) {
286 if (ext_valid) {
287 info->vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7);
288 if (info->vlan_valid) {
289 get_64bit_val(ext_cqe, 16, &qword6);
290 info->vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6);
291 }
292 info->smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7);
293 if (info->smac_valid) {
294 get_64bit_val(ext_cqe, 16, &qword6);
295 info->smac[0] = (u8)((qword6 >> 40) & 0xFF);
296 info->smac[1] = (u8)((qword6 >> 32) & 0xFF);
297 info->smac[2] = (u8)((qword6 >> 24) & 0xFF);
298 info->smac[3] = (u8)((qword6 >> 16) & 0xFF);
299 info->smac[4] = (u8)((qword6 >> 8) & 0xFF);
300 info->smac[5] = (u8)(qword6 & 0xFF);
301 }
302 }
303
304 if (cq->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) {
305 info->vlan_valid = (bool)FIELD_GET(IRDMA_VLAN_TAG_VALID, qword3);
306 info->l4proto = (u8)FIELD_GET(IRDMA_UDA_L4PROTO, qword2);
307 info->l3proto = (u8)FIELD_GET(IRDMA_UDA_L3PROTO, qword2);
308 }
309
310 info->payload_len = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
311 }
312
313 return 0;
314}
315
316
317
318
319
320
321
322enum irdma_status_code irdma_puda_poll_cmpl(struct irdma_sc_dev *dev,
323 struct irdma_sc_cq *cq,
324 u32 *compl_err)
325{
326 struct irdma_qp_uk *qp;
327 struct irdma_cq_uk *cq_uk = &cq->cq_uk;
328 struct irdma_puda_cmpl_info info = {};
329 enum irdma_status_code ret = 0;
330 struct irdma_puda_buf *buf;
331 struct irdma_puda_rsrc *rsrc;
332 u8 cq_type = cq->cq_type;
333 unsigned long flags;
334
335 if (cq_type == IRDMA_CQ_TYPE_ILQ || cq_type == IRDMA_CQ_TYPE_IEQ) {
336 rsrc = (cq_type == IRDMA_CQ_TYPE_ILQ) ? cq->vsi->ilq :
337 cq->vsi->ieq;
338 } else {
339 ibdev_dbg(to_ibdev(dev), "PUDA: qp_type error\n");
340 return IRDMA_ERR_BAD_PTR;
341 }
342
343 ret = irdma_puda_poll_info(cq, &info);
344 *compl_err = info.compl_error;
345 if (ret == IRDMA_ERR_Q_EMPTY)
346 return ret;
347 if (ret)
348 goto done;
349
350 qp = info.qp;
351 if (!qp || !rsrc) {
352 ret = IRDMA_ERR_BAD_PTR;
353 goto done;
354 }
355
356 if (qp->qp_id != rsrc->qp_id) {
357 ret = IRDMA_ERR_BAD_PTR;
358 goto done;
359 }
360
361 if (info.q_type == IRDMA_CQE_QTYPE_RQ) {
362 buf = (struct irdma_puda_buf *)(uintptr_t)
363 qp->rq_wrid_array[info.wqe_idx];
364
365
366 dma_sync_single_for_cpu(dev->hw->device, buf->mem.pa,
367 buf->mem.size, DMA_BIDIRECTIONAL);
368
369 ret = irdma_puda_get_tcpip_info(&info, buf);
370 if (ret) {
371 rsrc->stats_rcvd_pkt_err++;
372 if (cq_type == IRDMA_CQ_TYPE_ILQ) {
373 irdma_ilq_putback_rcvbuf(&rsrc->qp, buf,
374 info.wqe_idx);
375 } else {
376 irdma_puda_ret_bufpool(rsrc, buf);
377 irdma_puda_replenish_rq(rsrc, false);
378 }
379 goto done;
380 }
381
382 rsrc->stats_pkt_rcvd++;
383 rsrc->compl_rxwqe_idx = info.wqe_idx;
384 ibdev_dbg(to_ibdev(dev), "PUDA: RQ completion\n");
385 rsrc->receive(rsrc->vsi, buf);
386 if (cq_type == IRDMA_CQ_TYPE_ILQ)
387 irdma_ilq_putback_rcvbuf(&rsrc->qp, buf, info.wqe_idx);
388 else
389 irdma_puda_replenish_rq(rsrc, false);
390
391 } else {
392 ibdev_dbg(to_ibdev(dev), "PUDA: SQ completion\n");
393 buf = (struct irdma_puda_buf *)(uintptr_t)
394 qp->sq_wrtrk_array[info.wqe_idx].wrid;
395
396
397 dma_sync_single_for_cpu(dev->hw->device, buf->mem.pa,
398 buf->mem.size, DMA_BIDIRECTIONAL);
399 IRDMA_RING_SET_TAIL(qp->sq_ring, info.wqe_idx);
400 rsrc->xmit_complete(rsrc->vsi, buf);
401 spin_lock_irqsave(&rsrc->bufpool_lock, flags);
402 rsrc->tx_wqe_avail_cnt++;
403 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
404 if (!list_empty(&rsrc->txpend))
405 irdma_puda_send_buf(rsrc, NULL);
406 }
407
408done:
409 IRDMA_RING_MOVE_HEAD_NOCHECK(cq_uk->cq_ring);
410 if (!IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring))
411 cq_uk->polarity = !cq_uk->polarity;
412
413 IRDMA_RING_MOVE_TAIL(cq_uk->cq_ring);
414 set_64bit_val(cq_uk->shadow_area, 0,
415 IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring));
416
417 return ret;
418}
419
420
421
422
423
424
425enum irdma_status_code irdma_puda_send(struct irdma_sc_qp *qp,
426 struct irdma_puda_send_info *info)
427{
428 __le64 *wqe;
429 u32 iplen, l4len;
430 u64 hdr[2];
431 u32 wqe_idx;
432 u8 iipt;
433
434
435 l4len = info->tcplen >> 2;
436 if (info->ipv4) {
437 iipt = 3;
438 iplen = 5;
439 } else {
440 iipt = 1;
441 iplen = 10;
442 }
443
444 wqe = irdma_puda_get_next_send_wqe(&qp->qp_uk, &wqe_idx);
445 if (!wqe)
446 return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
447
448 qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid = (uintptr_t)info->scratch;
449
450
451
452 if (qp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
453 hdr[0] = 0;
454 hdr[1] = FIELD_PREP(IRDMA_UDA_QPSQ_OPCODE, IRDMA_OP_TYPE_SEND) |
455 FIELD_PREP(IRDMA_UDA_QPSQ_L4LEN, l4len) |
456 FIELD_PREP(IRDMAQPSQ_AHID, info->ah_id) |
457 FIELD_PREP(IRDMA_UDA_QPSQ_SIGCOMPL, 1) |
458 FIELD_PREP(IRDMA_UDA_QPSQ_VALID,
459 qp->qp_uk.swqe_polarity);
460
461
462
463 set_64bit_val(wqe, 0, info->paddr);
464 set_64bit_val(wqe, 8,
465 FIELD_PREP(IRDMAQPSQ_FRAG_LEN, info->len) |
466 FIELD_PREP(IRDMA_UDA_QPSQ_VALID, qp->qp_uk.swqe_polarity));
467 } else {
468 hdr[0] = FIELD_PREP(IRDMA_UDA_QPSQ_MACLEN, info->maclen >> 1) |
469 FIELD_PREP(IRDMA_UDA_QPSQ_IPLEN, iplen) |
470 FIELD_PREP(IRDMA_UDA_QPSQ_L4T, 1) |
471 FIELD_PREP(IRDMA_UDA_QPSQ_IIPT, iipt) |
472 FIELD_PREP(IRDMA_GEN1_UDA_QPSQ_L4LEN, l4len);
473
474 hdr[1] = FIELD_PREP(IRDMA_UDA_QPSQ_OPCODE, IRDMA_OP_TYPE_SEND) |
475 FIELD_PREP(IRDMA_UDA_QPSQ_SIGCOMPL, 1) |
476 FIELD_PREP(IRDMA_UDA_QPSQ_DOLOOPBACK, info->do_lpb) |
477 FIELD_PREP(IRDMA_UDA_QPSQ_VALID, qp->qp_uk.swqe_polarity);
478
479
480
481 set_64bit_val(wqe, 0, info->paddr);
482 set_64bit_val(wqe, 8,
483 FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, info->len));
484 }
485
486 set_64bit_val(wqe, 16, hdr[0]);
487 dma_wmb();
488
489 set_64bit_val(wqe, 24, hdr[1]);
490
491 print_hex_dump_debug("PUDA: PUDA SEND WQE", DUMP_PREFIX_OFFSET, 16, 8,
492 wqe, 32, false);
493 irdma_uk_qp_post_wr(&qp->qp_uk);
494 return 0;
495}
496
497
498
499
500
501
502void irdma_puda_send_buf(struct irdma_puda_rsrc *rsrc,
503 struct irdma_puda_buf *buf)
504{
505 struct irdma_puda_send_info info;
506 enum irdma_status_code ret = 0;
507 unsigned long flags;
508
509 spin_lock_irqsave(&rsrc->bufpool_lock, flags);
510
511
512
513 if (!rsrc->tx_wqe_avail_cnt || (buf && !list_empty(&rsrc->txpend))) {
514 list_add_tail(&buf->list, &rsrc->txpend);
515 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
516 rsrc->stats_sent_pkt_q++;
517 if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ)
518 ibdev_dbg(to_ibdev(rsrc->dev),
519 "PUDA: adding to txpend\n");
520 return;
521 }
522 rsrc->tx_wqe_avail_cnt--;
523
524
525
526 if (!buf) {
527 buf = irdma_puda_get_listbuf(&rsrc->txpend);
528 if (!buf)
529 goto done;
530 }
531
532 info.scratch = buf;
533 info.paddr = buf->mem.pa;
534 info.len = buf->totallen;
535 info.tcplen = buf->tcphlen;
536 info.ipv4 = buf->ipv4;
537
538 if (rsrc->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
539 info.ah_id = buf->ah_id;
540 } else {
541 info.maclen = buf->maclen;
542 info.do_lpb = buf->do_lpb;
543 }
544
545
546 dma_sync_single_for_cpu(rsrc->dev->hw->device, buf->mem.pa,
547 buf->mem.size, DMA_BIDIRECTIONAL);
548 ret = irdma_puda_send(&rsrc->qp, &info);
549 if (ret) {
550 rsrc->tx_wqe_avail_cnt++;
551 rsrc->stats_sent_pkt_q++;
552 list_add(&buf->list, &rsrc->txpend);
553 if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ)
554 ibdev_dbg(to_ibdev(rsrc->dev),
555 "PUDA: adding to puda_send\n");
556 } else {
557 rsrc->stats_pkt_sent++;
558 }
559done:
560 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
561}
562
563
564
565
566
567static void irdma_puda_qp_setctx(struct irdma_puda_rsrc *rsrc)
568{
569 struct irdma_sc_qp *qp = &rsrc->qp;
570 __le64 *qp_ctx = qp->hw_host_ctx;
571
572 set_64bit_val(qp_ctx, 8, qp->sq_pa);
573 set_64bit_val(qp_ctx, 16, qp->rq_pa);
574 set_64bit_val(qp_ctx, 24,
575 FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
576 FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size));
577 set_64bit_val(qp_ctx, 48,
578 FIELD_PREP(IRDMAQPC_SNDMSS, rsrc->buf_size));
579 set_64bit_val(qp_ctx, 56, 0);
580 if (qp->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
581 set_64bit_val(qp_ctx, 64, 1);
582 set_64bit_val(qp_ctx, 136,
583 FIELD_PREP(IRDMAQPC_TXCQNUM, rsrc->cq_id) |
584 FIELD_PREP(IRDMAQPC_RXCQNUM, rsrc->cq_id));
585 set_64bit_val(qp_ctx, 144,
586 FIELD_PREP(IRDMAQPC_STAT_INDEX, rsrc->stats_idx));
587 set_64bit_val(qp_ctx, 160,
588 FIELD_PREP(IRDMAQPC_PRIVEN, 1) |
589 FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, rsrc->stats_idx_valid));
590 set_64bit_val(qp_ctx, 168,
591 FIELD_PREP(IRDMAQPC_QPCOMPCTX, (uintptr_t)qp));
592 set_64bit_val(qp_ctx, 176,
593 FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
594 FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
595 FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle));
596
597 print_hex_dump_debug("PUDA: PUDA QP CONTEXT", DUMP_PREFIX_OFFSET, 16,
598 8, qp_ctx, IRDMA_QP_CTX_SIZE, false);
599}
600
601
602
603
604
605
606static enum irdma_status_code irdma_puda_qp_wqe(struct irdma_sc_dev *dev,
607 struct irdma_sc_qp *qp)
608{
609 struct irdma_sc_cqp *cqp;
610 __le64 *wqe;
611 u64 hdr;
612 struct irdma_ccq_cqe_info compl_info;
613 enum irdma_status_code status = 0;
614
615 cqp = dev->cqp;
616 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, 0);
617 if (!wqe)
618 return IRDMA_ERR_RING_FULL;
619
620 set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
621 set_64bit_val(wqe, 40, qp->shadow_area_pa);
622
623 hdr = qp->qp_uk.qp_id |
624 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_QP) |
625 FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, IRDMA_QP_TYPE_UDA) |
626 FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, 1) |
627 FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, 2) |
628 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
629 dma_wmb();
630
631 set_64bit_val(wqe, 24, hdr);
632
633 print_hex_dump_debug("PUDA: PUDA QP CREATE", DUMP_PREFIX_OFFSET, 16,
634 8, wqe, 40, false);
635 irdma_sc_cqp_post_sq(cqp);
636 status = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_CREATE_QP,
637 &compl_info);
638
639 return status;
640}
641
642
643
644
645
646static enum irdma_status_code irdma_puda_qp_create(struct irdma_puda_rsrc *rsrc)
647{
648 struct irdma_sc_qp *qp = &rsrc->qp;
649 struct irdma_qp_uk *ukqp = &qp->qp_uk;
650 enum irdma_status_code ret = 0;
651 u32 sq_size, rq_size;
652 struct irdma_dma_mem *mem;
653
654 sq_size = rsrc->sq_size * IRDMA_QP_WQE_MIN_SIZE;
655 rq_size = rsrc->rq_size * IRDMA_QP_WQE_MIN_SIZE;
656 rsrc->qpmem.size = ALIGN((sq_size + rq_size + (IRDMA_SHADOW_AREA_SIZE << 3) + IRDMA_QP_CTX_SIZE),
657 IRDMA_HW_PAGE_SIZE);
658 rsrc->qpmem.va = dma_alloc_coherent(rsrc->dev->hw->device,
659 rsrc->qpmem.size, &rsrc->qpmem.pa,
660 GFP_KERNEL);
661 if (!rsrc->qpmem.va)
662 return IRDMA_ERR_NO_MEMORY;
663
664 mem = &rsrc->qpmem;
665 memset(mem->va, 0, rsrc->qpmem.size);
666 qp->hw_sq_size = irdma_get_encoded_wqe_size(rsrc->sq_size, IRDMA_QUEUE_TYPE_SQ_RQ);
667 qp->hw_rq_size = irdma_get_encoded_wqe_size(rsrc->rq_size, IRDMA_QUEUE_TYPE_SQ_RQ);
668 qp->pd = &rsrc->sc_pd;
669 qp->qp_uk.qp_type = IRDMA_QP_TYPE_UDA;
670 qp->dev = rsrc->dev;
671 qp->qp_uk.back_qp = rsrc;
672 qp->sq_pa = mem->pa;
673 qp->rq_pa = qp->sq_pa + sq_size;
674 qp->vsi = rsrc->vsi;
675 ukqp->sq_base = mem->va;
676 ukqp->rq_base = &ukqp->sq_base[rsrc->sq_size];
677 ukqp->shadow_area = ukqp->rq_base[rsrc->rq_size].elem;
678 ukqp->uk_attrs = &qp->dev->hw_attrs.uk_attrs;
679 qp->shadow_area_pa = qp->rq_pa + rq_size;
680 qp->hw_host_ctx = ukqp->shadow_area + IRDMA_SHADOW_AREA_SIZE;
681 qp->hw_host_ctx_pa = qp->shadow_area_pa + (IRDMA_SHADOW_AREA_SIZE << 3);
682 qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
683 ukqp->qp_id = rsrc->qp_id;
684 ukqp->sq_wrtrk_array = rsrc->sq_wrtrk_array;
685 ukqp->rq_wrid_array = rsrc->rq_wrid_array;
686 ukqp->sq_size = rsrc->sq_size;
687 ukqp->rq_size = rsrc->rq_size;
688
689 IRDMA_RING_INIT(ukqp->sq_ring, ukqp->sq_size);
690 IRDMA_RING_INIT(ukqp->initial_ring, ukqp->sq_size);
691 IRDMA_RING_INIT(ukqp->rq_ring, ukqp->rq_size);
692 ukqp->wqe_alloc_db = qp->pd->dev->wqe_alloc_db;
693
694 ret = rsrc->dev->ws_add(qp->vsi, qp->user_pri);
695 if (ret) {
696 dma_free_coherent(rsrc->dev->hw->device, rsrc->qpmem.size,
697 rsrc->qpmem.va, rsrc->qpmem.pa);
698 rsrc->qpmem.va = NULL;
699 return ret;
700 }
701
702 irdma_qp_add_qos(qp);
703 irdma_puda_qp_setctx(rsrc);
704
705 if (rsrc->dev->ceq_valid)
706 ret = irdma_cqp_qp_create_cmd(rsrc->dev, qp);
707 else
708 ret = irdma_puda_qp_wqe(rsrc->dev, qp);
709 if (ret) {
710 irdma_qp_rem_qos(qp);
711 rsrc->dev->ws_remove(qp->vsi, qp->user_pri);
712 dma_free_coherent(rsrc->dev->hw->device, rsrc->qpmem.size,
713 rsrc->qpmem.va, rsrc->qpmem.pa);
714 rsrc->qpmem.va = NULL;
715 }
716
717 return ret;
718}
719
720
721
722
723
724
725static enum irdma_status_code irdma_puda_cq_wqe(struct irdma_sc_dev *dev,
726 struct irdma_sc_cq *cq)
727{
728 __le64 *wqe;
729 struct irdma_sc_cqp *cqp;
730 u64 hdr;
731 struct irdma_ccq_cqe_info compl_info;
732 enum irdma_status_code status = 0;
733
734 cqp = dev->cqp;
735 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, 0);
736 if (!wqe)
737 return IRDMA_ERR_RING_FULL;
738
739 set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
740 set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
741 set_64bit_val(wqe, 16,
742 FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, cq->shadow_read_threshold));
743 set_64bit_val(wqe, 32, cq->cq_pa);
744 set_64bit_val(wqe, 40, cq->shadow_area_pa);
745 set_64bit_val(wqe, 56,
746 FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) |
747 FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx));
748
749 hdr = cq->cq_uk.cq_id |
750 FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CQ) |
751 FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, 1) |
752 FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, 1) |
753 FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, 1) |
754 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
755 dma_wmb();
756
757 set_64bit_val(wqe, 24, hdr);
758
759 print_hex_dump_debug("PUDA: PUDA CREATE CQ", DUMP_PREFIX_OFFSET, 16,
760 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
761 irdma_sc_cqp_post_sq(dev->cqp);
762 status = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_CREATE_CQ,
763 &compl_info);
764 if (!status) {
765 struct irdma_sc_ceq *ceq = dev->ceq[0];
766
767 if (ceq && ceq->reg_cq)
768 status = irdma_sc_add_cq_ctx(ceq, cq);
769 }
770
771 return status;
772}
773
774
775
776
777
778static enum irdma_status_code irdma_puda_cq_create(struct irdma_puda_rsrc *rsrc)
779{
780 struct irdma_sc_dev *dev = rsrc->dev;
781 struct irdma_sc_cq *cq = &rsrc->cq;
782 enum irdma_status_code ret = 0;
783 u32 cqsize;
784 struct irdma_dma_mem *mem;
785 struct irdma_cq_init_info info = {};
786 struct irdma_cq_uk_init_info *init_info = &info.cq_uk_init_info;
787
788 cq->vsi = rsrc->vsi;
789 cqsize = rsrc->cq_size * (sizeof(struct irdma_cqe));
790 rsrc->cqmem.size = ALIGN(cqsize + sizeof(struct irdma_cq_shadow_area),
791 IRDMA_CQ0_ALIGNMENT);
792 rsrc->cqmem.va = dma_alloc_coherent(dev->hw->device, rsrc->cqmem.size,
793 &rsrc->cqmem.pa, GFP_KERNEL);
794 if (!rsrc->cqmem.va)
795 return IRDMA_ERR_NO_MEMORY;
796
797 mem = &rsrc->cqmem;
798 info.dev = dev;
799 info.type = (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ) ?
800 IRDMA_CQ_TYPE_ILQ : IRDMA_CQ_TYPE_IEQ;
801 info.shadow_read_threshold = rsrc->cq_size >> 2;
802 info.cq_base_pa = mem->pa;
803 info.shadow_area_pa = mem->pa + cqsize;
804 init_info->cq_base = mem->va;
805 init_info->shadow_area = (__le64 *)((u8 *)mem->va + cqsize);
806 init_info->cq_size = rsrc->cq_size;
807 init_info->cq_id = rsrc->cq_id;
808 info.ceqe_mask = true;
809 info.ceq_id_valid = true;
810 info.vsi = rsrc->vsi;
811
812 ret = irdma_sc_cq_init(cq, &info);
813 if (ret)
814 goto error;
815
816 if (rsrc->dev->ceq_valid)
817 ret = irdma_cqp_cq_create_cmd(dev, cq);
818 else
819 ret = irdma_puda_cq_wqe(dev, cq);
820error:
821 if (ret) {
822 dma_free_coherent(dev->hw->device, rsrc->cqmem.size,
823 rsrc->cqmem.va, rsrc->cqmem.pa);
824 rsrc->cqmem.va = NULL;
825 }
826
827 return ret;
828}
829
830
831
832
833
834static void irdma_puda_free_qp(struct irdma_puda_rsrc *rsrc)
835{
836 enum irdma_status_code ret;
837 struct irdma_ccq_cqe_info compl_info;
838 struct irdma_sc_dev *dev = rsrc->dev;
839
840 if (rsrc->dev->ceq_valid) {
841 irdma_cqp_qp_destroy_cmd(dev, &rsrc->qp);
842 rsrc->dev->ws_remove(rsrc->qp.vsi, rsrc->qp.user_pri);
843 return;
844 }
845
846 ret = irdma_sc_qp_destroy(&rsrc->qp, 0, false, true, true);
847 if (ret)
848 ibdev_dbg(to_ibdev(dev),
849 "PUDA: error puda qp destroy wqe, status = %d\n",
850 ret);
851 if (!ret) {
852 ret = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_DESTROY_QP,
853 &compl_info);
854 if (ret)
855 ibdev_dbg(to_ibdev(dev),
856 "PUDA: error puda qp destroy failed, status = %d\n",
857 ret);
858 }
859 rsrc->dev->ws_remove(rsrc->qp.vsi, rsrc->qp.user_pri);
860}
861
862
863
864
865
866static void irdma_puda_free_cq(struct irdma_puda_rsrc *rsrc)
867{
868 enum irdma_status_code ret;
869 struct irdma_ccq_cqe_info compl_info;
870 struct irdma_sc_dev *dev = rsrc->dev;
871
872 if (rsrc->dev->ceq_valid) {
873 irdma_cqp_cq_destroy_cmd(dev, &rsrc->cq);
874 return;
875 }
876
877 ret = irdma_sc_cq_destroy(&rsrc->cq, 0, true);
878 if (ret)
879 ibdev_dbg(to_ibdev(dev), "PUDA: error ieq cq destroy\n");
880 if (!ret) {
881 ret = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_DESTROY_CQ,
882 &compl_info);
883 if (ret)
884 ibdev_dbg(to_ibdev(dev),
885 "PUDA: error ieq qp destroy done\n");
886 }
887}
888
889
890
891
892
893
894
895void irdma_puda_dele_rsrc(struct irdma_sc_vsi *vsi, enum puda_rsrc_type type,
896 bool reset)
897{
898 struct irdma_sc_dev *dev = vsi->dev;
899 struct irdma_puda_rsrc *rsrc;
900 struct irdma_puda_buf *buf = NULL;
901 struct irdma_puda_buf *nextbuf = NULL;
902 struct irdma_virt_mem *vmem;
903 struct irdma_sc_ceq *ceq;
904
905 ceq = vsi->dev->ceq[0];
906 switch (type) {
907 case IRDMA_PUDA_RSRC_TYPE_ILQ:
908 rsrc = vsi->ilq;
909 vmem = &vsi->ilq_mem;
910 vsi->ilq = NULL;
911 if (ceq && ceq->reg_cq)
912 irdma_sc_remove_cq_ctx(ceq, &rsrc->cq);
913 break;
914 case IRDMA_PUDA_RSRC_TYPE_IEQ:
915 rsrc = vsi->ieq;
916 vmem = &vsi->ieq_mem;
917 vsi->ieq = NULL;
918 if (ceq && ceq->reg_cq)
919 irdma_sc_remove_cq_ctx(ceq, &rsrc->cq);
920 break;
921 default:
922 ibdev_dbg(to_ibdev(dev), "PUDA: error resource type = 0x%x\n",
923 type);
924 return;
925 }
926
927 switch (rsrc->cmpl) {
928 case PUDA_HASH_CRC_COMPLETE:
929 irdma_free_hash_desc(rsrc->hash_desc);
930 fallthrough;
931 case PUDA_QP_CREATED:
932 irdma_qp_rem_qos(&rsrc->qp);
933
934 if (!reset)
935 irdma_puda_free_qp(rsrc);
936
937 dma_free_coherent(dev->hw->device, rsrc->qpmem.size,
938 rsrc->qpmem.va, rsrc->qpmem.pa);
939 rsrc->qpmem.va = NULL;
940 fallthrough;
941 case PUDA_CQ_CREATED:
942 if (!reset)
943 irdma_puda_free_cq(rsrc);
944
945 dma_free_coherent(dev->hw->device, rsrc->cqmem.size,
946 rsrc->cqmem.va, rsrc->cqmem.pa);
947 rsrc->cqmem.va = NULL;
948 break;
949 default:
950 ibdev_dbg(to_ibdev(rsrc->dev), "PUDA: error no resources\n");
951 break;
952 }
953
954 buf = rsrc->alloclist;
955 while (buf) {
956 nextbuf = buf->next;
957 irdma_puda_dele_buf(dev, buf);
958 buf = nextbuf;
959 rsrc->alloc_buf_count--;
960 }
961
962 kfree(vmem->va);
963}
964
965
966
967
968
969
970static enum irdma_status_code irdma_puda_allocbufs(struct irdma_puda_rsrc *rsrc,
971 u32 count)
972{
973 u32 i;
974 struct irdma_puda_buf *buf;
975 struct irdma_puda_buf *nextbuf;
976
977 for (i = 0; i < count; i++) {
978 buf = irdma_puda_alloc_buf(rsrc->dev, rsrc->buf_size);
979 if (!buf) {
980 rsrc->stats_buf_alloc_fail++;
981 return IRDMA_ERR_NO_MEMORY;
982 }
983 irdma_puda_ret_bufpool(rsrc, buf);
984 rsrc->alloc_buf_count++;
985 if (!rsrc->alloclist) {
986 rsrc->alloclist = buf;
987 } else {
988 nextbuf = rsrc->alloclist;
989 rsrc->alloclist = buf;
990 buf->next = nextbuf;
991 }
992 }
993
994 rsrc->avail_buf_count = rsrc->alloc_buf_count;
995
996 return 0;
997}
998
999
1000
1001
1002
1003
1004enum irdma_status_code irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi,
1005 struct irdma_puda_rsrc_info *info)
1006{
1007 struct irdma_sc_dev *dev = vsi->dev;
1008 enum irdma_status_code ret = 0;
1009 struct irdma_puda_rsrc *rsrc;
1010 u32 pudasize;
1011 u32 sqwridsize, rqwridsize;
1012 struct irdma_virt_mem *vmem;
1013
1014 info->count = 1;
1015 pudasize = sizeof(struct irdma_puda_rsrc);
1016 sqwridsize = info->sq_size * sizeof(struct irdma_sq_uk_wr_trk_info);
1017 rqwridsize = info->rq_size * 8;
1018 switch (info->type) {
1019 case IRDMA_PUDA_RSRC_TYPE_ILQ:
1020 vmem = &vsi->ilq_mem;
1021 break;
1022 case IRDMA_PUDA_RSRC_TYPE_IEQ:
1023 vmem = &vsi->ieq_mem;
1024 break;
1025 default:
1026 return IRDMA_NOT_SUPPORTED;
1027 }
1028 vmem->size = pudasize + sqwridsize + rqwridsize;
1029 vmem->va = kzalloc(vmem->size, GFP_KERNEL);
1030 if (!vmem->va)
1031 return IRDMA_ERR_NO_MEMORY;
1032
1033 rsrc = vmem->va;
1034 spin_lock_init(&rsrc->bufpool_lock);
1035 switch (info->type) {
1036 case IRDMA_PUDA_RSRC_TYPE_ILQ:
1037 vsi->ilq = vmem->va;
1038 vsi->ilq_count = info->count;
1039 rsrc->receive = info->receive;
1040 rsrc->xmit_complete = info->xmit_complete;
1041 break;
1042 case IRDMA_PUDA_RSRC_TYPE_IEQ:
1043 vsi->ieq_count = info->count;
1044 vsi->ieq = vmem->va;
1045 rsrc->receive = irdma_ieq_receive;
1046 rsrc->xmit_complete = irdma_ieq_tx_compl;
1047 break;
1048 default:
1049 return IRDMA_NOT_SUPPORTED;
1050 }
1051
1052 rsrc->type = info->type;
1053 rsrc->sq_wrtrk_array = (struct irdma_sq_uk_wr_trk_info *)
1054 ((u8 *)vmem->va + pudasize);
1055 rsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize);
1056
1057 INIT_LIST_HEAD(&rsrc->bufpool);
1058 INIT_LIST_HEAD(&rsrc->txpend);
1059
1060 rsrc->tx_wqe_avail_cnt = info->sq_size - 1;
1061 irdma_sc_pd_init(dev, &rsrc->sc_pd, info->pd_id, info->abi_ver);
1062 rsrc->qp_id = info->qp_id;
1063 rsrc->cq_id = info->cq_id;
1064 rsrc->sq_size = info->sq_size;
1065 rsrc->rq_size = info->rq_size;
1066 rsrc->cq_size = info->rq_size + info->sq_size;
1067 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1068 if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ)
1069 rsrc->cq_size += info->rq_size;
1070 }
1071 rsrc->buf_size = info->buf_size;
1072 rsrc->dev = dev;
1073 rsrc->vsi = vsi;
1074 rsrc->stats_idx = info->stats_idx;
1075 rsrc->stats_idx_valid = info->stats_idx_valid;
1076
1077 ret = irdma_puda_cq_create(rsrc);
1078 if (!ret) {
1079 rsrc->cmpl = PUDA_CQ_CREATED;
1080 ret = irdma_puda_qp_create(rsrc);
1081 }
1082 if (ret) {
1083 ibdev_dbg(to_ibdev(dev),
1084 "PUDA: error qp_create type=%d, status=%d\n",
1085 rsrc->type, ret);
1086 goto error;
1087 }
1088 rsrc->cmpl = PUDA_QP_CREATED;
1089
1090 ret = irdma_puda_allocbufs(rsrc, info->tx_buf_cnt + info->rq_size);
1091 if (ret) {
1092 ibdev_dbg(to_ibdev(dev), "PUDA: error alloc_buf\n");
1093 goto error;
1094 }
1095
1096 rsrc->rxq_invalid_cnt = info->rq_size;
1097 ret = irdma_puda_replenish_rq(rsrc, true);
1098 if (ret)
1099 goto error;
1100
1101 if (info->type == IRDMA_PUDA_RSRC_TYPE_IEQ) {
1102 if (!irdma_init_hash_desc(&rsrc->hash_desc)) {
1103 rsrc->check_crc = true;
1104 rsrc->cmpl = PUDA_HASH_CRC_COMPLETE;
1105 ret = 0;
1106 }
1107 }
1108
1109 irdma_sc_ccq_arm(&rsrc->cq);
1110 return ret;
1111
1112error:
1113 irdma_puda_dele_rsrc(vsi, info->type, false);
1114
1115 return ret;
1116}
1117
1118
1119
1120
1121
1122
1123
1124static void irdma_ilq_putback_rcvbuf(struct irdma_sc_qp *qp,
1125 struct irdma_puda_buf *buf, u32 wqe_idx)
1126{
1127 __le64 *wqe;
1128 u64 offset8, offset24;
1129
1130
1131 dma_sync_single_for_device(qp->dev->hw->device, buf->mem.pa,
1132 buf->mem.size, DMA_BIDIRECTIONAL);
1133 wqe = qp->qp_uk.rq_base[wqe_idx].elem;
1134 get_64bit_val(wqe, 24, &offset24);
1135 if (qp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1136 get_64bit_val(wqe, 8, &offset8);
1137 if (offset24)
1138 offset8 &= ~FIELD_PREP(IRDMAQPSQ_VALID, 1);
1139 else
1140 offset8 |= FIELD_PREP(IRDMAQPSQ_VALID, 1);
1141 set_64bit_val(wqe, 8, offset8);
1142 dma_wmb();
1143 }
1144 if (offset24)
1145 offset24 = 0;
1146 else
1147 offset24 = FIELD_PREP(IRDMAQPSQ_VALID, 1);
1148
1149 set_64bit_val(wqe, 24, offset24);
1150}
1151
1152
1153
1154
1155
1156
1157
1158static u16 irdma_ieq_get_fpdu_len(struct irdma_pfpdu *pfpdu, u8 *datap,
1159 u32 rcv_seq)
1160{
1161 u32 marker_seq, end_seq, blk_start;
1162 u8 marker_len = pfpdu->marker_len;
1163 u16 total_len = 0;
1164 u16 fpdu_len;
1165
1166 blk_start = (pfpdu->rcv_start_seq - rcv_seq) & (IRDMA_MRK_BLK_SZ - 1);
1167 if (!blk_start) {
1168 total_len = marker_len;
1169 marker_seq = rcv_seq + IRDMA_MRK_BLK_SZ;
1170 if (marker_len && *(u32 *)datap)
1171 return 0;
1172 } else {
1173 marker_seq = rcv_seq + blk_start;
1174 }
1175
1176 datap += total_len;
1177 fpdu_len = ntohs(*(__be16 *)datap);
1178 fpdu_len += IRDMA_IEQ_MPA_FRAMING;
1179 fpdu_len = (fpdu_len + 3) & 0xfffc;
1180
1181 if (fpdu_len > pfpdu->max_fpdu_data)
1182 return 0;
1183
1184 total_len += fpdu_len;
1185 end_seq = rcv_seq + total_len;
1186 while ((int)(marker_seq - end_seq) < 0) {
1187 total_len += marker_len;
1188 end_seq += marker_len;
1189 marker_seq += IRDMA_MRK_BLK_SZ;
1190 }
1191
1192 return total_len;
1193}
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203static void irdma_ieq_copy_to_txbuf(struct irdma_puda_buf *buf,
1204 struct irdma_puda_buf *txbuf,
1205 u16 buf_offset, u32 txbuf_offset, u32 len)
1206{
1207 void *mem1 = (u8 *)buf->mem.va + buf_offset;
1208 void *mem2 = (u8 *)txbuf->mem.va + txbuf_offset;
1209
1210 memcpy(mem2, mem1, len);
1211}
1212
1213
1214
1215
1216
1217
1218static void irdma_ieq_setup_tx_buf(struct irdma_puda_buf *buf,
1219 struct irdma_puda_buf *txbuf)
1220{
1221 txbuf->tcphlen = buf->tcphlen;
1222 txbuf->ipv4 = buf->ipv4;
1223
1224 if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1225 txbuf->hdrlen = txbuf->tcphlen;
1226 irdma_ieq_copy_to_txbuf(buf, txbuf, IRDMA_TCP_OFFSET, 0,
1227 txbuf->hdrlen);
1228 } else {
1229 txbuf->maclen = buf->maclen;
1230 txbuf->hdrlen = buf->hdrlen;
1231 irdma_ieq_copy_to_txbuf(buf, txbuf, 0, 0, buf->hdrlen);
1232 }
1233}
1234
1235
1236
1237
1238
1239
1240static void irdma_ieq_check_first_buf(struct irdma_puda_buf *buf, u32 fps)
1241{
1242 u32 offset;
1243
1244 if (buf->seqnum < fps) {
1245 offset = fps - buf->seqnum;
1246 if (offset > buf->datalen)
1247 return;
1248 buf->data += offset;
1249 buf->datalen -= (u16)offset;
1250 buf->seqnum = fps;
1251 }
1252}
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262static void irdma_ieq_compl_pfpdu(struct irdma_puda_rsrc *ieq,
1263 struct list_head *rxlist,
1264 struct list_head *pbufl,
1265 struct irdma_puda_buf *txbuf, u16 fpdu_len)
1266{
1267 struct irdma_puda_buf *buf;
1268 u32 nextseqnum;
1269 u16 txoffset, bufoffset;
1270
1271 buf = irdma_puda_get_listbuf(pbufl);
1272 if (!buf)
1273 return;
1274
1275 nextseqnum = buf->seqnum + fpdu_len;
1276 irdma_ieq_setup_tx_buf(buf, txbuf);
1277 if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1278 txoffset = txbuf->hdrlen;
1279 txbuf->totallen = txbuf->hdrlen + fpdu_len;
1280 txbuf->data = (u8 *)txbuf->mem.va + txoffset;
1281 } else {
1282 txoffset = buf->hdrlen;
1283 txbuf->totallen = buf->hdrlen + fpdu_len;
1284 txbuf->data = (u8 *)txbuf->mem.va + buf->hdrlen;
1285 }
1286 bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
1287
1288 do {
1289 if (buf->datalen >= fpdu_len) {
1290
1291 irdma_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset,
1292 fpdu_len);
1293 buf->datalen -= fpdu_len;
1294 buf->data += fpdu_len;
1295 buf->seqnum = nextseqnum;
1296 break;
1297 }
1298
1299 irdma_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset,
1300 buf->datalen);
1301 txoffset += buf->datalen;
1302 fpdu_len -= buf->datalen;
1303 irdma_puda_ret_bufpool(ieq, buf);
1304 buf = irdma_puda_get_listbuf(pbufl);
1305 if (!buf)
1306 return;
1307
1308 bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
1309 } while (1);
1310
1311
1312 if (buf->datalen)
1313 list_add(&buf->list, rxlist);
1314 else
1315 irdma_puda_ret_bufpool(ieq, buf);
1316}
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326static enum irdma_status_code
1327irdma_ieq_create_pbufl(struct irdma_pfpdu *pfpdu, struct list_head *rxlist,
1328 struct list_head *pbufl, struct irdma_puda_buf *buf,
1329 u16 fpdu_len)
1330{
1331 enum irdma_status_code status = 0;
1332 struct irdma_puda_buf *nextbuf;
1333 u32 nextseqnum;
1334 u16 plen = fpdu_len - buf->datalen;
1335 bool done = false;
1336
1337 nextseqnum = buf->seqnum + buf->datalen;
1338 do {
1339 nextbuf = irdma_puda_get_listbuf(rxlist);
1340 if (!nextbuf) {
1341 status = IRDMA_ERR_list_empty;
1342 break;
1343 }
1344 list_add_tail(&nextbuf->list, pbufl);
1345 if (nextbuf->seqnum != nextseqnum) {
1346 pfpdu->bad_seq_num++;
1347 status = IRDMA_ERR_SEQ_NUM;
1348 break;
1349 }
1350 if (nextbuf->datalen >= plen) {
1351 done = true;
1352 } else {
1353 plen -= nextbuf->datalen;
1354 nextseqnum = nextbuf->seqnum + nextbuf->datalen;
1355 }
1356
1357 } while (!done);
1358
1359 return status;
1360}
1361
1362
1363
1364
1365
1366
1367
1368
1369static enum irdma_status_code
1370irdma_ieq_handle_partial(struct irdma_puda_rsrc *ieq, struct irdma_pfpdu *pfpdu,
1371 struct irdma_puda_buf *buf, u16 fpdu_len)
1372{
1373 enum irdma_status_code status = 0;
1374 u8 *crcptr;
1375 u32 mpacrc;
1376 u32 seqnum = buf->seqnum;
1377 struct list_head pbufl;
1378 struct irdma_puda_buf *txbuf = NULL;
1379 struct list_head *rxlist = &pfpdu->rxlist;
1380
1381 ieq->partials_handled++;
1382
1383 INIT_LIST_HEAD(&pbufl);
1384 list_add(&buf->list, &pbufl);
1385
1386 status = irdma_ieq_create_pbufl(pfpdu, rxlist, &pbufl, buf, fpdu_len);
1387 if (status)
1388 goto error;
1389
1390 txbuf = irdma_puda_get_bufpool(ieq);
1391 if (!txbuf) {
1392 pfpdu->no_tx_bufs++;
1393 status = IRDMA_ERR_NO_TXBUFS;
1394 goto error;
1395 }
1396
1397 irdma_ieq_compl_pfpdu(ieq, rxlist, &pbufl, txbuf, fpdu_len);
1398 irdma_ieq_update_tcpip_info(txbuf, fpdu_len, seqnum);
1399
1400 crcptr = txbuf->data + fpdu_len - 4;
1401 mpacrc = *(u32 *)crcptr;
1402 if (ieq->check_crc) {
1403 status = irdma_ieq_check_mpacrc(ieq->hash_desc, txbuf->data,
1404 (fpdu_len - 4), mpacrc);
1405 if (status) {
1406 ibdev_dbg(to_ibdev(ieq->dev), "IEQ: error bad crc\n");
1407 goto error;
1408 }
1409 }
1410
1411 print_hex_dump_debug("IEQ: IEQ TX BUFFER", DUMP_PREFIX_OFFSET, 16, 8,
1412 txbuf->mem.va, txbuf->totallen, false);
1413 if (ieq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
1414 txbuf->ah_id = pfpdu->ah->ah_info.ah_idx;
1415 txbuf->do_lpb = true;
1416 irdma_puda_send_buf(ieq, txbuf);
1417 pfpdu->rcv_nxt = seqnum + fpdu_len;
1418 return status;
1419
1420error:
1421 while (!list_empty(&pbufl)) {
1422 buf = list_last_entry(&pbufl, struct irdma_puda_buf, list);
1423 list_move(&buf->list, rxlist);
1424 }
1425 if (txbuf)
1426 irdma_puda_ret_bufpool(ieq, txbuf);
1427
1428 return status;
1429}
1430
1431
1432
1433
1434
1435
1436
1437static enum irdma_status_code irdma_ieq_process_buf(struct irdma_puda_rsrc *ieq,
1438 struct irdma_pfpdu *pfpdu,
1439 struct irdma_puda_buf *buf)
1440{
1441 u16 fpdu_len = 0;
1442 u16 datalen = buf->datalen;
1443 u8 *datap = buf->data;
1444 u8 *crcptr;
1445 u16 ioffset = 0;
1446 u32 mpacrc;
1447 u32 seqnum = buf->seqnum;
1448 u16 len = 0;
1449 u16 full = 0;
1450 bool partial = false;
1451 struct irdma_puda_buf *txbuf;
1452 struct list_head *rxlist = &pfpdu->rxlist;
1453 enum irdma_status_code ret = 0;
1454
1455 ioffset = (u16)(buf->data - (u8 *)buf->mem.va);
1456 while (datalen) {
1457 fpdu_len = irdma_ieq_get_fpdu_len(pfpdu, datap, buf->seqnum);
1458 if (!fpdu_len) {
1459 ibdev_dbg(to_ibdev(ieq->dev),
1460 "IEQ: error bad fpdu len\n");
1461 list_add(&buf->list, rxlist);
1462 return IRDMA_ERR_MPA_CRC;
1463 }
1464
1465 if (datalen < fpdu_len) {
1466 partial = true;
1467 break;
1468 }
1469 crcptr = datap + fpdu_len - 4;
1470 mpacrc = *(u32 *)crcptr;
1471 if (ieq->check_crc)
1472 ret = irdma_ieq_check_mpacrc(ieq->hash_desc, datap,
1473 fpdu_len - 4, mpacrc);
1474 if (ret) {
1475 list_add(&buf->list, rxlist);
1476 ibdev_dbg(to_ibdev(ieq->dev),
1477 "ERR: IRDMA_ERR_MPA_CRC\n");
1478 return IRDMA_ERR_MPA_CRC;
1479 }
1480 full++;
1481 pfpdu->fpdu_processed++;
1482 ieq->fpdu_processed++;
1483 datap += fpdu_len;
1484 len += fpdu_len;
1485 datalen -= fpdu_len;
1486 }
1487 if (full) {
1488
1489 txbuf = irdma_puda_get_bufpool(ieq);
1490 if (!txbuf) {
1491 pfpdu->no_tx_bufs++;
1492 list_add(&buf->list, rxlist);
1493 return IRDMA_ERR_NO_TXBUFS;
1494 }
1495
1496 irdma_ieq_setup_tx_buf(buf, txbuf);
1497
1498 if (ieq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1499 irdma_ieq_copy_to_txbuf(buf, txbuf, ioffset,
1500 txbuf->hdrlen, len);
1501 txbuf->totallen = txbuf->hdrlen + len;
1502 txbuf->ah_id = pfpdu->ah->ah_info.ah_idx;
1503 } else {
1504 irdma_ieq_copy_to_txbuf(buf, txbuf, ioffset,
1505 buf->hdrlen, len);
1506 txbuf->totallen = buf->hdrlen + len;
1507 }
1508 irdma_ieq_update_tcpip_info(txbuf, len, buf->seqnum);
1509 print_hex_dump_debug("IEQ: IEQ TX BUFFER", DUMP_PREFIX_OFFSET,
1510 16, 8, txbuf->mem.va, txbuf->totallen,
1511 false);
1512 txbuf->do_lpb = true;
1513 irdma_puda_send_buf(ieq, txbuf);
1514
1515 if (!datalen) {
1516 pfpdu->rcv_nxt = buf->seqnum + len;
1517 irdma_puda_ret_bufpool(ieq, buf);
1518 return 0;
1519 }
1520 buf->data = datap;
1521 buf->seqnum = seqnum + len;
1522 buf->datalen = datalen;
1523 pfpdu->rcv_nxt = buf->seqnum;
1524 }
1525 if (partial)
1526 return irdma_ieq_handle_partial(ieq, pfpdu, buf, fpdu_len);
1527
1528 return 0;
1529}
1530
1531
1532
1533
1534
1535
1536void irdma_ieq_process_fpdus(struct irdma_sc_qp *qp,
1537 struct irdma_puda_rsrc *ieq)
1538{
1539 struct irdma_pfpdu *pfpdu = &qp->pfpdu;
1540 struct list_head *rxlist = &pfpdu->rxlist;
1541 struct irdma_puda_buf *buf;
1542 enum irdma_status_code status;
1543
1544 do {
1545 if (list_empty(rxlist))
1546 break;
1547 buf = irdma_puda_get_listbuf(rxlist);
1548 if (!buf) {
1549 ibdev_dbg(to_ibdev(ieq->dev), "IEQ: error no buf\n");
1550 break;
1551 }
1552 if (buf->seqnum != pfpdu->rcv_nxt) {
1553
1554 pfpdu->out_of_order++;
1555 list_add(&buf->list, rxlist);
1556 break;
1557 }
1558
1559 status = irdma_ieq_process_buf(ieq, pfpdu, buf);
1560 if (status == IRDMA_ERR_MPA_CRC) {
1561 pfpdu->mpa_crc_err = true;
1562 while (!list_empty(rxlist)) {
1563 buf = irdma_puda_get_listbuf(rxlist);
1564 irdma_puda_ret_bufpool(ieq, buf);
1565 pfpdu->crc_err++;
1566 ieq->crc_err++;
1567 }
1568
1569 irdma_ieq_mpa_crc_ae(ieq->dev, qp);
1570 }
1571 } while (!status);
1572}
1573
1574
1575
1576
1577
1578
1579static enum irdma_status_code irdma_ieq_create_ah(struct irdma_sc_qp *qp,
1580 struct irdma_puda_buf *buf)
1581{
1582 struct irdma_ah_info ah_info = {};
1583
1584 qp->pfpdu.ah_buf = buf;
1585 irdma_puda_ieq_get_ah_info(qp, &ah_info);
1586 return irdma_puda_create_ah(qp->vsi->dev, &ah_info, false,
1587 IRDMA_PUDA_RSRC_TYPE_IEQ, qp,
1588 &qp->pfpdu.ah);
1589}
1590
1591
1592
1593
1594
1595
1596
1597static void irdma_ieq_handle_exception(struct irdma_puda_rsrc *ieq,
1598 struct irdma_sc_qp *qp,
1599 struct irdma_puda_buf *buf)
1600{
1601 struct irdma_pfpdu *pfpdu = &qp->pfpdu;
1602 u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx;
1603 u32 rcv_wnd = hw_host_ctx[23];
1604
1605 u32 fps = *(u32 *)(qp->q2_buf + Q2_FPSN_OFFSET);
1606 struct list_head *rxlist = &pfpdu->rxlist;
1607 unsigned long flags = 0;
1608 u8 hw_rev = qp->dev->hw_attrs.uk_attrs.hw_rev;
1609
1610 print_hex_dump_debug("IEQ: IEQ RX BUFFER", DUMP_PREFIX_OFFSET, 16, 8,
1611 buf->mem.va, buf->totallen, false);
1612
1613 spin_lock_irqsave(&pfpdu->lock, flags);
1614 pfpdu->total_ieq_bufs++;
1615 if (pfpdu->mpa_crc_err) {
1616 pfpdu->crc_err++;
1617 goto error;
1618 }
1619 if (pfpdu->mode && fps != pfpdu->fps) {
1620
1621 irdma_ieq_cleanup_qp(ieq, qp);
1622 ibdev_dbg(to_ibdev(ieq->dev), "IEQ: restarting new partial\n");
1623 pfpdu->mode = false;
1624 }
1625
1626 if (!pfpdu->mode) {
1627 print_hex_dump_debug("IEQ: Q2 BUFFER", DUMP_PREFIX_OFFSET, 16,
1628 8, (u64 *)qp->q2_buf, 128, false);
1629
1630 pfpdu->rcv_nxt = fps;
1631 pfpdu->fps = fps;
1632 pfpdu->mode = true;
1633 pfpdu->max_fpdu_data = (buf->ipv4) ?
1634 (ieq->vsi->mtu - IRDMA_MTU_TO_MSS_IPV4) :
1635 (ieq->vsi->mtu - IRDMA_MTU_TO_MSS_IPV6);
1636 pfpdu->pmode_count++;
1637 ieq->pmode_count++;
1638 INIT_LIST_HEAD(rxlist);
1639 irdma_ieq_check_first_buf(buf, fps);
1640 }
1641
1642 if (!(rcv_wnd >= (buf->seqnum - pfpdu->rcv_nxt))) {
1643 pfpdu->bad_seq_num++;
1644 ieq->bad_seq_num++;
1645 goto error;
1646 }
1647
1648 if (!list_empty(rxlist)) {
1649 if (buf->seqnum != pfpdu->nextseqnum) {
1650 irdma_send_ieq_ack(qp);
1651
1652 goto error;
1653 }
1654 }
1655
1656 list_add_tail(&buf->list, rxlist);
1657 pfpdu->nextseqnum = buf->seqnum + buf->datalen;
1658 pfpdu->lastrcv_buf = buf;
1659 if (hw_rev >= IRDMA_GEN_2 && !pfpdu->ah) {
1660 irdma_ieq_create_ah(qp, buf);
1661 if (!pfpdu->ah)
1662 goto error;
1663 goto exit;
1664 }
1665 if (hw_rev == IRDMA_GEN_1)
1666 irdma_ieq_process_fpdus(qp, ieq);
1667 else if (pfpdu->ah && pfpdu->ah->ah_info.ah_valid)
1668 irdma_ieq_process_fpdus(qp, ieq);
1669exit:
1670 spin_unlock_irqrestore(&pfpdu->lock, flags);
1671
1672 return;
1673
1674error:
1675 irdma_puda_ret_bufpool(ieq, buf);
1676 spin_unlock_irqrestore(&pfpdu->lock, flags);
1677}
1678
1679
1680
1681
1682
1683
1684static void irdma_ieq_receive(struct irdma_sc_vsi *vsi,
1685 struct irdma_puda_buf *buf)
1686{
1687 struct irdma_puda_rsrc *ieq = vsi->ieq;
1688 struct irdma_sc_qp *qp = NULL;
1689 u32 wqe_idx = ieq->compl_rxwqe_idx;
1690
1691 qp = irdma_ieq_get_qp(vsi->dev, buf);
1692 if (!qp) {
1693 ieq->stats_bad_qp_id++;
1694 irdma_puda_ret_bufpool(ieq, buf);
1695 } else {
1696 irdma_ieq_handle_exception(ieq, qp, buf);
1697 }
1698
1699
1700
1701
1702 if (!ieq->rxq_invalid_cnt)
1703 ieq->rx_wqe_idx = wqe_idx;
1704 ieq->rxq_invalid_cnt++;
1705}
1706
1707
1708
1709
1710
1711
1712static void irdma_ieq_tx_compl(struct irdma_sc_vsi *vsi, void *sqwrid)
1713{
1714 struct irdma_puda_rsrc *ieq = vsi->ieq;
1715 struct irdma_puda_buf *buf = sqwrid;
1716
1717 irdma_puda_ret_bufpool(ieq, buf);
1718}
1719
1720
1721
1722
1723
1724
1725void irdma_ieq_cleanup_qp(struct irdma_puda_rsrc *ieq, struct irdma_sc_qp *qp)
1726{
1727 struct irdma_puda_buf *buf;
1728 struct irdma_pfpdu *pfpdu = &qp->pfpdu;
1729 struct list_head *rxlist = &pfpdu->rxlist;
1730
1731 if (qp->pfpdu.ah) {
1732 irdma_puda_free_ah(ieq->dev, qp->pfpdu.ah);
1733 qp->pfpdu.ah = NULL;
1734 qp->pfpdu.ah_buf = NULL;
1735 }
1736
1737 if (!pfpdu->mode)
1738 return;
1739
1740 while (!list_empty(rxlist)) {
1741 buf = irdma_puda_get_listbuf(rxlist);
1742 irdma_puda_ret_bufpool(ieq, buf);
1743 }
1744}
1745