1
2
3
4
5
6#include "ena_eth_com.h"
7
8static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
9 struct ena_com_io_cq *io_cq)
10{
11 struct ena_eth_io_rx_cdesc_base *cdesc;
12 u16 expected_phase, head_masked;
13 u16 desc_phase;
14
15 head_masked = io_cq->head & (io_cq->q_depth - 1);
16 expected_phase = io_cq->phase;
17
18 cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
19 + (head_masked * io_cq->cdesc_entry_size_in_bytes));
20
21 desc_phase = (READ_ONCE(cdesc->status) &
22 ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
23 ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
24
25 if (desc_phase != expected_phase)
26 return NULL;
27
28
29
30
31 dma_rmb();
32
33 return cdesc;
34}
35
36static void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
37{
38 u16 tail_masked;
39 u32 offset;
40
41 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
42
43 offset = tail_masked * io_sq->desc_entry_size;
44
45 return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
46}
47
48static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
49 u8 *bounce_buffer)
50{
51 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
52
53 u16 dst_tail_mask;
54 u32 dst_offset;
55
56 dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
57 dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
58
59 if (is_llq_max_tx_burst_exists(io_sq)) {
60 if (unlikely(!io_sq->entries_in_tx_burst_left)) {
61 pr_err("Error: trying to send more packets than tx burst allows\n");
62 return -ENOSPC;
63 }
64
65 io_sq->entries_in_tx_burst_left--;
66 pr_debug("Decreasing entries_in_tx_burst_left of queue %d to %d\n",
67 io_sq->qid, io_sq->entries_in_tx_burst_left);
68 }
69
70
71
72
73 wmb();
74
75
76 __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
77 bounce_buffer, (llq_info->desc_list_entry_size) / 8);
78
79 io_sq->tail++;
80
81
82 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
83 io_sq->phase ^= 1;
84
85 return 0;
86}
87
88static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
89 u8 *header_src,
90 u16 header_len)
91{
92 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
93 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
94 u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
95 u16 header_offset;
96
97 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
98 return 0;
99
100 header_offset =
101 llq_info->descs_num_before_header * io_sq->desc_entry_size;
102
103 if (unlikely((header_offset + header_len) >
104 llq_info->desc_list_entry_size)) {
105 pr_err("Trying to write header larger than llq entry can accommodate\n");
106 return -EFAULT;
107 }
108
109 if (unlikely(!bounce_buffer)) {
110 pr_err("Bounce buffer is NULL\n");
111 return -EFAULT;
112 }
113
114 memcpy(bounce_buffer + header_offset, header_src, header_len);
115
116 return 0;
117}
118
119static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
120{
121 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
122 u8 *bounce_buffer;
123 void *sq_desc;
124
125 bounce_buffer = pkt_ctrl->curr_bounce_buf;
126
127 if (unlikely(!bounce_buffer)) {
128 pr_err("Bounce buffer is NULL\n");
129 return NULL;
130 }
131
132 sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
133 pkt_ctrl->idx++;
134 pkt_ctrl->descs_left_in_line--;
135
136 return sq_desc;
137}
138
139static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
140{
141 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
142 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
143 int rc;
144
145 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
146 return 0;
147
148
149 if (pkt_ctrl->idx) {
150 rc = ena_com_write_bounce_buffer_to_dev(io_sq,
151 pkt_ctrl->curr_bounce_buf);
152 if (unlikely(rc))
153 return rc;
154
155 pkt_ctrl->curr_bounce_buf =
156 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
157 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
158 0x0, llq_info->desc_list_entry_size);
159 }
160
161 pkt_ctrl->idx = 0;
162 pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
163 return 0;
164}
165
166static void *get_sq_desc(struct ena_com_io_sq *io_sq)
167{
168 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
169 return get_sq_desc_llq(io_sq);
170
171 return get_sq_desc_regular_queue(io_sq);
172}
173
174static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
175{
176 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
177 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
178 int rc;
179
180 if (!pkt_ctrl->descs_left_in_line) {
181 rc = ena_com_write_bounce_buffer_to_dev(io_sq,
182 pkt_ctrl->curr_bounce_buf);
183 if (unlikely(rc))
184 return rc;
185
186 pkt_ctrl->curr_bounce_buf =
187 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
188 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
189 0x0, llq_info->desc_list_entry_size);
190
191 pkt_ctrl->idx = 0;
192 if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY))
193 pkt_ctrl->descs_left_in_line = 1;
194 else
195 pkt_ctrl->descs_left_in_line =
196 llq_info->desc_list_entry_size / io_sq->desc_entry_size;
197 }
198
199 return 0;
200}
201
202static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
203{
204 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
205 return ena_com_sq_update_llq_tail(io_sq);
206
207 io_sq->tail++;
208
209
210 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
211 io_sq->phase ^= 1;
212
213 return 0;
214}
215
216static struct ena_eth_io_rx_cdesc_base *
217 ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
218{
219 idx &= (io_cq->q_depth - 1);
220 return (struct ena_eth_io_rx_cdesc_base *)
221 ((uintptr_t)io_cq->cdesc_addr.virt_addr +
222 idx * io_cq->cdesc_entry_size_in_bytes);
223}
224
225static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
226 u16 *first_cdesc_idx)
227{
228 struct ena_eth_io_rx_cdesc_base *cdesc;
229 u16 count = 0, head_masked;
230 u32 last = 0;
231
232 do {
233 cdesc = ena_com_get_next_rx_cdesc(io_cq);
234 if (!cdesc)
235 break;
236
237 ena_com_cq_inc_head(io_cq);
238 count++;
239 last = (READ_ONCE(cdesc->status) &
240 ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
241 ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
242 } while (!last);
243
244 if (last) {
245 *first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
246 count += io_cq->cur_rx_pkt_cdesc_count;
247
248 head_masked = io_cq->head & (io_cq->q_depth - 1);
249
250 io_cq->cur_rx_pkt_cdesc_count = 0;
251 io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
252
253 pr_debug("ENA q_id: %d packets were completed. first desc idx %u descs# %d\n",
254 io_cq->qid, *first_cdesc_idx, count);
255 } else {
256 io_cq->cur_rx_pkt_cdesc_count += count;
257 count = 0;
258 }
259
260 return count;
261}
262
263static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
264 struct ena_com_tx_meta *ena_meta)
265{
266 struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
267
268 meta_desc = get_sq_desc(io_sq);
269 if (unlikely(!meta_desc))
270 return -EFAULT;
271
272 memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
273
274 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
275
276 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
277
278
279 meta_desc->word2 |= ((u32)ena_meta->mss <<
280 ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
281 ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
282
283 meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
284 ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) &
285 ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
286
287
288 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
289 meta_desc->len_ctrl |= ((u32)io_sq->phase <<
290 ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
291 ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
292
293 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
294 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
295
296 meta_desc->word2 |= ena_meta->l3_hdr_len &
297 ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
298 meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
299 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
300 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
301
302 meta_desc->word2 |= ((u32)ena_meta->l4_hdr_len <<
303 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
304 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
305
306 return ena_com_sq_update_tail(io_sq);
307}
308
309static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
310 struct ena_com_tx_ctx *ena_tx_ctx,
311 bool *have_meta)
312{
313 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
314
315
316
317
318 if (io_sq->disable_meta_caching) {
319 if (unlikely(!ena_tx_ctx->meta_valid))
320 return -EINVAL;
321
322 *have_meta = true;
323 return ena_com_create_meta(io_sq, ena_meta);
324 }
325
326 if (ena_com_meta_desc_changed(io_sq, ena_tx_ctx)) {
327 *have_meta = true;
328
329 memcpy(&io_sq->cached_tx_meta, ena_meta,
330 sizeof(struct ena_com_tx_meta));
331 return ena_com_create_meta(io_sq, ena_meta);
332 }
333
334 *have_meta = false;
335 return 0;
336}
337
338static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
339 struct ena_eth_io_rx_cdesc_base *cdesc)
340{
341 ena_rx_ctx->l3_proto = cdesc->status &
342 ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
343 ena_rx_ctx->l4_proto =
344 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
345 ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
346 ena_rx_ctx->l3_csum_err =
347 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
348 ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT);
349 ena_rx_ctx->l4_csum_err =
350 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
351 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT);
352 ena_rx_ctx->l4_csum_checked =
353 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >>
354 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT);
355 ena_rx_ctx->hash = cdesc->hash;
356 ena_rx_ctx->frag =
357 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
358 ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
359
360 pr_debug("l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n",
361 ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto,
362 ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err,
363 ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
364}
365
366
367
368
369
370int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
371 struct ena_com_tx_ctx *ena_tx_ctx,
372 int *nb_hw_desc)
373{
374 struct ena_eth_io_tx_desc *desc = NULL;
375 struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
376 void *buffer_to_push = ena_tx_ctx->push_header;
377 u16 header_len = ena_tx_ctx->header_len;
378 u16 num_bufs = ena_tx_ctx->num_bufs;
379 u16 start_tail = io_sq->tail;
380 int i, rc;
381 bool have_meta;
382 u64 addr_hi;
383
384 WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, "wrong Q type");
385
386
387 if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {
388 pr_debug("Not enough space in the tx queue\n");
389 return -ENOMEM;
390 }
391
392 if (unlikely(header_len > io_sq->tx_max_header_size)) {
393 pr_err("Header size is too large %d max header: %d\n",
394 header_len, io_sq->tx_max_header_size);
395 return -EINVAL;
396 }
397
398 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
399 !buffer_to_push))
400 return -EINVAL;
401
402 rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
403 if (unlikely(rc))
404 return rc;
405
406 rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta);
407 if (unlikely(rc)) {
408 pr_err("Failed to create and store tx meta desc\n");
409 return rc;
410 }
411
412
413 if (unlikely(!num_bufs && !header_len)) {
414 rc = ena_com_close_bounce_buffer(io_sq);
415 *nb_hw_desc = io_sq->tail - start_tail;
416 return rc;
417 }
418
419 desc = get_sq_desc(io_sq);
420 if (unlikely(!desc))
421 return -EFAULT;
422 memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
423
424
425 if (!have_meta)
426 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
427
428 desc->buff_addr_hi_hdr_sz |= ((u32)header_len <<
429 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
430 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
431 desc->len_ctrl |= ((u32)io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
432 ENA_ETH_IO_TX_DESC_PHASE_MASK;
433
434 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
435
436
437 desc->meta_ctrl |= ((u32)ena_tx_ctx->req_id <<
438 ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
439 ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
440
441 desc->meta_ctrl |= (ena_tx_ctx->df <<
442 ENA_ETH_IO_TX_DESC_DF_SHIFT) &
443 ENA_ETH_IO_TX_DESC_DF_MASK;
444
445
446 desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) <<
447 ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) &
448 ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
449
450 if (ena_tx_ctx->meta_valid) {
451 desc->meta_ctrl |= (ena_tx_ctx->tso_enable <<
452 ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) &
453 ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
454 desc->meta_ctrl |= ena_tx_ctx->l3_proto &
455 ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
456 desc->meta_ctrl |= (ena_tx_ctx->l4_proto <<
457 ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) &
458 ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
459 desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable <<
460 ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) &
461 ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
462 desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable <<
463 ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) &
464 ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
465 desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial <<
466 ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) &
467 ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
468 }
469
470 for (i = 0; i < num_bufs; i++) {
471
472 if (likely(i != 0)) {
473 rc = ena_com_sq_update_tail(io_sq);
474 if (unlikely(rc))
475 return rc;
476
477 desc = get_sq_desc(io_sq);
478 if (unlikely(!desc))
479 return -EFAULT;
480
481 memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
482
483 desc->len_ctrl |= ((u32)io_sq->phase <<
484 ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
485 ENA_ETH_IO_TX_DESC_PHASE_MASK;
486 }
487
488 desc->len_ctrl |= ena_bufs->len &
489 ENA_ETH_IO_TX_DESC_LENGTH_MASK;
490
491 addr_hi = ((ena_bufs->paddr &
492 GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
493
494 desc->buff_addr_lo = (u32)ena_bufs->paddr;
495 desc->buff_addr_hi_hdr_sz |= addr_hi &
496 ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
497 ena_bufs++;
498 }
499
500
501 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
502
503 rc = ena_com_sq_update_tail(io_sq);
504 if (unlikely(rc))
505 return rc;
506
507 rc = ena_com_close_bounce_buffer(io_sq);
508
509 *nb_hw_desc = io_sq->tail - start_tail;
510 return rc;
511}
512
513int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
514 struct ena_com_io_sq *io_sq,
515 struct ena_com_rx_ctx *ena_rx_ctx)
516{
517 struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
518 struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
519 u16 q_depth = io_cq->q_depth;
520 u16 cdesc_idx = 0;
521 u16 nb_hw_desc;
522 u16 i = 0;
523
524 WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
525
526 nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
527 if (nb_hw_desc == 0) {
528 ena_rx_ctx->descs = nb_hw_desc;
529 return 0;
530 }
531
532 pr_debug("Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid,
533 nb_hw_desc);
534
535 if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
536 pr_err("Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc,
537 ena_rx_ctx->max_bufs);
538 return -ENOSPC;
539 }
540
541 cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx);
542 ena_rx_ctx->pkt_offset = cdesc->offset;
543
544 do {
545 ena_buf[i].len = cdesc->length;
546 ena_buf[i].req_id = cdesc->req_id;
547 if (unlikely(ena_buf[i].req_id >= q_depth))
548 return -EIO;
549
550 if (++i >= nb_hw_desc)
551 break;
552
553 cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
554
555 } while (1);
556
557
558 io_sq->next_to_comp += nb_hw_desc;
559
560 pr_debug("[%s][QID#%d] Updating SQ head to: %d\n", __func__, io_sq->qid,
561 io_sq->next_to_comp);
562
563
564 ena_com_rx_set_flags(ena_rx_ctx, cdesc);
565
566 ena_rx_ctx->descs = nb_hw_desc;
567 return 0;
568}
569
570int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
571 struct ena_com_buf *ena_buf,
572 u16 req_id)
573{
574 struct ena_eth_io_rx_desc *desc;
575
576 WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
577
578 if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
579 return -ENOSPC;
580
581 desc = get_sq_desc(io_sq);
582 if (unlikely(!desc))
583 return -EFAULT;
584
585 memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
586
587 desc->length = ena_buf->len;
588
589 desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK |
590 ENA_ETH_IO_RX_DESC_LAST_MASK |
591 (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK) |
592 ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
593
594 desc->req_id = req_id;
595
596 desc->buff_addr_lo = (u32)ena_buf->paddr;
597 desc->buff_addr_hi =
598 ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
599
600 return ena_com_sq_update_tail(io_sq);
601}
602
603bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
604{
605 struct ena_eth_io_rx_cdesc_base *cdesc;
606
607 cdesc = ena_com_get_next_rx_cdesc(io_cq);
608 if (cdesc)
609 return false;
610 else
611 return true;
612}
613