1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include "ena_eth_com.h"
34
35static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
36 struct ena_com_io_cq *io_cq)
37{
38 struct ena_eth_io_rx_cdesc_base *cdesc;
39 u16 expected_phase, head_masked;
40 u16 desc_phase;
41
42 head_masked = io_cq->head & (io_cq->q_depth - 1);
43 expected_phase = io_cq->phase;
44
45 cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
46 + (head_masked * io_cq->cdesc_entry_size_in_bytes));
47
48 desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
49 ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
50
51 if (desc_phase != expected_phase)
52 return NULL;
53
54 return cdesc;
55}
56
57static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
58{
59 io_cq->head++;
60
61
62 if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
63 io_cq->phase ^= 1;
64}
65
66static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
67{
68 u16 tail_masked;
69 u32 offset;
70
71 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
72
73 offset = tail_masked * io_sq->desc_entry_size;
74
75 return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
76}
77
78static inline void ena_com_copy_curr_sq_desc_to_dev(struct ena_com_io_sq *io_sq)
79{
80 u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
81 u32 offset = tail_masked * io_sq->desc_entry_size;
82
83
84 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
85 return;
86
87 memcpy_toio(io_sq->desc_addr.pbuf_dev_addr + offset,
88 io_sq->desc_addr.virt_addr + offset,
89 io_sq->desc_entry_size);
90}
91
92static inline void ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
93{
94 io_sq->tail++;
95
96
97 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
98 io_sq->phase ^= 1;
99}
100
101static inline int ena_com_write_header(struct ena_com_io_sq *io_sq,
102 u8 *head_src, u16 header_len)
103{
104 u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
105 u8 __iomem *dev_head_addr =
106 io_sq->header_addr + (tail_masked * io_sq->tx_max_header_size);
107
108 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
109 return 0;
110
111 if (unlikely(!io_sq->header_addr)) {
112 pr_err("Push buffer header ptr is NULL\n");
113 return -EINVAL;
114 }
115
116 memcpy_toio(dev_head_addr, head_src, header_len);
117
118 return 0;
119}
120
121static inline struct ena_eth_io_rx_cdesc_base *
122 ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
123{
124 idx &= (io_cq->q_depth - 1);
125 return (struct ena_eth_io_rx_cdesc_base *)
126 ((uintptr_t)io_cq->cdesc_addr.virt_addr +
127 idx * io_cq->cdesc_entry_size_in_bytes);
128}
129
130static inline u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
131 u16 *first_cdesc_idx)
132{
133 struct ena_eth_io_rx_cdesc_base *cdesc;
134 u16 count = 0, head_masked;
135 u32 last = 0;
136
137 do {
138 cdesc = ena_com_get_next_rx_cdesc(io_cq);
139 if (!cdesc)
140 break;
141
142 ena_com_cq_inc_head(io_cq);
143 count++;
144 last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
145 ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
146 } while (!last);
147
148 if (last) {
149 *first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
150 count += io_cq->cur_rx_pkt_cdesc_count;
151
152 head_masked = io_cq->head & (io_cq->q_depth - 1);
153
154 io_cq->cur_rx_pkt_cdesc_count = 0;
155 io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
156
157 pr_debug("ena q_id: %d packets were completed. first desc idx %u descs# %d\n",
158 io_cq->qid, *first_cdesc_idx, count);
159 } else {
160 io_cq->cur_rx_pkt_cdesc_count += count;
161 count = 0;
162 }
163
164 return count;
165}
166
167static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
168 struct ena_com_tx_ctx *ena_tx_ctx)
169{
170 int rc;
171
172 if (ena_tx_ctx->meta_valid) {
173 rc = memcmp(&io_sq->cached_tx_meta,
174 &ena_tx_ctx->ena_meta,
175 sizeof(struct ena_com_tx_meta));
176
177 if (unlikely(rc != 0))
178 return true;
179 }
180
181 return false;
182}
183
184static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
185 struct ena_com_tx_ctx *ena_tx_ctx)
186{
187 struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
188 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
189
190 meta_desc = get_sq_desc(io_sq);
191 memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
192
193 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
194
195 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
196
197
198 meta_desc->word2 |= (ena_meta->mss <<
199 ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
200 ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
201
202 meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
203 ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) &
204 ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
205
206
207 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
208 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
209 meta_desc->len_ctrl |= (io_sq->phase <<
210 ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
211 ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
212
213 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
214 meta_desc->word2 |= ena_meta->l3_hdr_len &
215 ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
216 meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
217 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
218 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
219
220 meta_desc->word2 |= (ena_meta->l4_hdr_len <<
221 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
222 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
223
224 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
225
226
227 memcpy(&io_sq->cached_tx_meta, ena_meta,
228 sizeof(struct ena_com_tx_meta));
229
230 ena_com_copy_curr_sq_desc_to_dev(io_sq);
231 ena_com_sq_update_tail(io_sq);
232}
233
234static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
235 struct ena_eth_io_rx_cdesc_base *cdesc)
236{
237 ena_rx_ctx->l3_proto = cdesc->status &
238 ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
239 ena_rx_ctx->l4_proto =
240 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
241 ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
242 ena_rx_ctx->l3_csum_err =
243 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
244 ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT;
245 ena_rx_ctx->l4_csum_err =
246 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
247 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT;
248 ena_rx_ctx->hash = cdesc->hash;
249 ena_rx_ctx->frag =
250 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
251 ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
252
253 pr_debug("ena_rx_ctx->l3_proto %d ena_rx_ctx->l4_proto %d\nena_rx_ctx->l3_csum_err %d ena_rx_ctx->l4_csum_err %d\nhash frag %d frag: %d cdesc_status: %x\n",
254 ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto,
255 ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err,
256 ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
257}
258
259
260
261
262
263int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
264 struct ena_com_tx_ctx *ena_tx_ctx,
265 int *nb_hw_desc)
266{
267 struct ena_eth_io_tx_desc *desc = NULL;
268 struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
269 void *push_header = ena_tx_ctx->push_header;
270 u16 header_len = ena_tx_ctx->header_len;
271 u16 num_bufs = ena_tx_ctx->num_bufs;
272 int total_desc, i, rc;
273 bool have_meta;
274 u64 addr_hi;
275
276 WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, "wrong Q type");
277
278
279 if (ena_com_sq_empty_space(io_sq) < (num_bufs + 1)) {
280 pr_err("Not enough space in the tx queue\n");
281 return -ENOMEM;
282 }
283
284 if (unlikely(header_len > io_sq->tx_max_header_size)) {
285 pr_err("header size is too large %d max header: %d\n",
286 header_len, io_sq->tx_max_header_size);
287 return -EINVAL;
288 }
289
290
291 rc = ena_com_write_header(io_sq, push_header, header_len);
292 if (unlikely(rc))
293 return rc;
294
295 have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq,
296 ena_tx_ctx);
297 if (have_meta)
298 ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx);
299
300
301 if (unlikely(!num_bufs && !header_len)) {
302 *nb_hw_desc = have_meta ? 0 : 1;
303 return 0;
304 }
305
306 desc = get_sq_desc(io_sq);
307 memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
308
309
310 if (!have_meta)
311 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
312
313 desc->buff_addr_hi_hdr_sz |= (header_len <<
314 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
315 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
316 desc->len_ctrl |= (io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
317 ENA_ETH_IO_TX_DESC_PHASE_MASK;
318
319 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
320
321
322 desc->meta_ctrl |= (ena_tx_ctx->req_id <<
323 ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
324 ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
325
326 desc->meta_ctrl |= (ena_tx_ctx->df <<
327 ENA_ETH_IO_TX_DESC_DF_SHIFT) &
328 ENA_ETH_IO_TX_DESC_DF_MASK;
329
330
331 desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) <<
332 ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) &
333 ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
334
335 if (ena_tx_ctx->meta_valid) {
336 desc->meta_ctrl |= (ena_tx_ctx->tso_enable <<
337 ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) &
338 ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
339 desc->meta_ctrl |= ena_tx_ctx->l3_proto &
340 ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
341 desc->meta_ctrl |= (ena_tx_ctx->l4_proto <<
342 ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) &
343 ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
344 desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable <<
345 ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) &
346 ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
347 desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable <<
348 ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) &
349 ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
350 desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial <<
351 ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) &
352 ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
353 }
354
355 for (i = 0; i < num_bufs; i++) {
356
357 if (likely(i != 0)) {
358 ena_com_copy_curr_sq_desc_to_dev(io_sq);
359 ena_com_sq_update_tail(io_sq);
360
361 desc = get_sq_desc(io_sq);
362 memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
363
364 desc->len_ctrl |= (io_sq->phase <<
365 ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
366 ENA_ETH_IO_TX_DESC_PHASE_MASK;
367 }
368
369 desc->len_ctrl |= ena_bufs->len &
370 ENA_ETH_IO_TX_DESC_LENGTH_MASK;
371
372 addr_hi = ((ena_bufs->paddr &
373 GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
374
375 desc->buff_addr_lo = (u32)ena_bufs->paddr;
376 desc->buff_addr_hi_hdr_sz |= addr_hi &
377 ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
378 ena_bufs++;
379 }
380
381
382 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
383
384 ena_com_copy_curr_sq_desc_to_dev(io_sq);
385
386 ena_com_sq_update_tail(io_sq);
387
388 total_desc = max_t(u16, num_bufs, 1);
389 total_desc += have_meta ? 1 : 0;
390
391 *nb_hw_desc = total_desc;
392 return 0;
393}
394
395int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
396 struct ena_com_io_sq *io_sq,
397 struct ena_com_rx_ctx *ena_rx_ctx)
398{
399 struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
400 struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
401 u16 cdesc_idx = 0;
402 u16 nb_hw_desc;
403 u16 i;
404
405 WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
406
407 nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
408 if (nb_hw_desc == 0) {
409 ena_rx_ctx->descs = nb_hw_desc;
410 return 0;
411 }
412
413 pr_debug("fetch rx packet: queue %d completed desc: %d\n", io_cq->qid,
414 nb_hw_desc);
415
416 if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
417 pr_err("Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc,
418 ena_rx_ctx->max_bufs);
419 return -ENOSPC;
420 }
421
422 for (i = 0; i < nb_hw_desc; i++) {
423 cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
424
425 ena_buf->len = cdesc->length;
426 ena_buf->req_id = cdesc->req_id;
427 ena_buf++;
428 }
429
430
431 io_sq->next_to_comp += nb_hw_desc;
432
433 pr_debug("[%s][QID#%d] Updating SQ head to: %d\n", __func__, io_sq->qid,
434 io_sq->next_to_comp);
435
436
437 ena_com_rx_set_flags(ena_rx_ctx, cdesc);
438
439 ena_rx_ctx->descs = nb_hw_desc;
440 return 0;
441}
442
443int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
444 struct ena_com_buf *ena_buf,
445 u16 req_id)
446{
447 struct ena_eth_io_rx_desc *desc;
448
449 WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
450
451 if (unlikely(ena_com_sq_empty_space(io_sq) == 0))
452 return -ENOSPC;
453
454 desc = get_sq_desc(io_sq);
455 memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
456
457 desc->length = ena_buf->len;
458
459 desc->ctrl |= ENA_ETH_IO_RX_DESC_FIRST_MASK;
460 desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK;
461 desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK;
462 desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
463
464 desc->req_id = req_id;
465
466 desc->buff_addr_lo = (u32)ena_buf->paddr;
467 desc->buff_addr_hi =
468 ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
469
470 ena_com_sq_update_tail(io_sq);
471
472 return 0;
473}
474
475int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
476{
477 u8 expected_phase, cdesc_phase;
478 struct ena_eth_io_tx_cdesc *cdesc;
479 u16 masked_head;
480
481 masked_head = io_cq->head & (io_cq->q_depth - 1);
482 expected_phase = io_cq->phase;
483
484 cdesc = (struct ena_eth_io_tx_cdesc *)
485 ((uintptr_t)io_cq->cdesc_addr.virt_addr +
486 (masked_head * io_cq->cdesc_entry_size_in_bytes));
487
488
489
490
491
492 cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
493 if (cdesc_phase != expected_phase)
494 return -EAGAIN;
495
496 ena_com_cq_inc_head(io_cq);
497
498 *req_id = READ_ONCE(cdesc->req_id);
499
500 return 0;
501}
502