1
2
3
4
5#ifndef __CN10K_WORKER_H__
6#define __CN10K_WORKER_H__
7
8#include <rte_vect.h>
9
10#include "cn10k_cryptodev_ops.h"
11#include "cnxk_ethdev.h"
12#include "cnxk_eventdev.h"
13#include "cnxk_worker.h"
14
15#include "cn10k_ethdev.h"
16#include "cn10k_rx.h"
17#include "cn10k_tx.h"
18
19
20
21static __rte_always_inline uint8_t
22cn10k_sso_hws_new_event(struct cn10k_sso_hws *ws, const struct rte_event *ev)
23{
24 const uint32_t tag = (uint32_t)ev->event;
25 const uint8_t new_tt = ev->sched_type;
26 const uint64_t event_ptr = ev->u64;
27 const uint16_t grp = ev->queue_id;
28
29 rte_atomic_thread_fence(__ATOMIC_ACQ_REL);
30 if (ws->xaq_lmt <= *ws->fc_mem)
31 return 0;
32
33 cnxk_sso_hws_add_work(event_ptr, tag, new_tt,
34 ws->grp_base + (grp << 12));
35 return 1;
36}
37
38static __rte_always_inline void
39cn10k_sso_hws_fwd_swtag(struct cn10k_sso_hws *ws, const struct rte_event *ev)
40{
41 const uint32_t tag = (uint32_t)ev->event;
42 const uint8_t new_tt = ev->sched_type;
43 const uint8_t cur_tt =
44 CNXK_TT_FROM_TAG(plt_read64(ws->base + SSOW_LF_GWS_WQE0));
45
46
47
48
49
50
51
52
53
54 if (new_tt == SSO_TT_UNTAGGED) {
55 if (cur_tt != SSO_TT_UNTAGGED)
56 cnxk_sso_hws_swtag_untag(ws->base +
57 SSOW_LF_GWS_OP_SWTAG_UNTAG);
58 } else {
59 cnxk_sso_hws_swtag_norm(tag, new_tt,
60 ws->base + SSOW_LF_GWS_OP_SWTAG_NORM);
61 }
62 ws->swtag_req = 1;
63}
64
65static __rte_always_inline void
66cn10k_sso_hws_fwd_group(struct cn10k_sso_hws *ws, const struct rte_event *ev,
67 const uint16_t grp)
68{
69 const uint32_t tag = (uint32_t)ev->event;
70 const uint8_t new_tt = ev->sched_type;
71
72 plt_write64(ev->u64, ws->base + SSOW_LF_GWS_OP_UPD_WQP_GRP1);
73 cnxk_sso_hws_swtag_desched(tag, new_tt, grp,
74 ws->base + SSOW_LF_GWS_OP_SWTAG_DESCHED);
75}
76
77static __rte_always_inline void
78cn10k_sso_hws_forward_event(struct cn10k_sso_hws *ws,
79 const struct rte_event *ev)
80{
81 const uint8_t grp = ev->queue_id;
82
83
84 if (CNXK_GRP_FROM_TAG(plt_read64(ws->base + SSOW_LF_GWS_WQE0)) == grp)
85 cn10k_sso_hws_fwd_swtag(ws, ev);
86 else
87
88
89
90
91
92 cn10k_sso_hws_fwd_group(ws, ev, grp);
93}
94
95static __rte_always_inline void
96cn10k_wqe_to_mbuf(uint64_t wqe, const uint64_t mbuf, uint8_t port_id,
97 const uint32_t tag, const uint32_t flags,
98 const void *const lookup_mem)
99{
100 const uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM |
101 (flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
102
103 cn10k_nix_cqe_to_mbuf((struct nix_cqe_hdr_s *)wqe, tag,
104 (struct rte_mbuf *)mbuf, lookup_mem,
105 mbuf_init | ((uint64_t)port_id) << 48, flags);
106}
107
108static __rte_always_inline void
109cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
110 void *lookup_mem, void *tstamp, uintptr_t lbase)
111{
112 uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM |
113 (flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
114 struct rte_event_vector *vec;
115 uint64_t aura_handle, laddr;
116 uint16_t nb_mbufs, non_vec;
117 uint16_t lmt_id, d_off;
118 struct rte_mbuf *mbuf;
119 uint8_t loff = 0;
120 uint64_t sa_base;
121 uint64_t **wqe;
122
123 mbuf_init |= ((uint64_t)port_id) << 48;
124 vec = (struct rte_event_vector *)vwqe;
125 wqe = vec->u64s;
126
127 nb_mbufs = RTE_ALIGN_FLOOR(vec->nb_elem, NIX_DESCS_PER_LOOP);
128 nb_mbufs = cn10k_nix_recv_pkts_vector(&mbuf_init, vec->mbufs, nb_mbufs,
129 flags | NIX_RX_VWQE_F, lookup_mem,
130 tstamp, lbase);
131 wqe += nb_mbufs;
132 non_vec = vec->nb_elem - nb_mbufs;
133
134 if (flags & NIX_RX_OFFLOAD_SECURITY_F && non_vec) {
135 mbuf = (struct rte_mbuf *)((uintptr_t)wqe[0] -
136 sizeof(struct rte_mbuf));
137
138
139
140 aura_handle = mbuf->pool->pool_id;
141 ROC_LMT_BASE_ID_GET(lbase, lmt_id);
142 laddr = lbase;
143 laddr += 8;
144 d_off = ((uintptr_t)mbuf->buf_addr - (uintptr_t)mbuf);
145 d_off += (mbuf_init & 0xFFFF);
146 sa_base = cnxk_nix_sa_base_get(mbuf_init >> 48, lookup_mem);
147 sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
148 }
149
150 while (non_vec) {
151 struct nix_cqe_hdr_s *cqe = (struct nix_cqe_hdr_s *)wqe[0];
152 uint64_t tstamp_ptr;
153
154 mbuf = (struct rte_mbuf *)((char *)cqe -
155 sizeof(struct rte_mbuf));
156
157
158 if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
159 const uint64_t cq_w1 = *((const uint64_t *)cqe + 1);
160
161 mbuf = nix_sec_meta_to_mbuf_sc(cq_w1, sa_base, laddr,
162 &loff, mbuf, d_off);
163 }
164
165 cn10k_nix_cqe_to_mbuf(cqe, cqe->tag, mbuf, lookup_mem,
166 mbuf_init, flags);
167
168 tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)cqe) +
169 CNXK_SSO_WQE_SG_PTR);
170 cnxk_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp,
171 flags & NIX_RX_OFFLOAD_TSTAMP_F,
172 flags & NIX_RX_MULTI_SEG_F,
173 (uint64_t *)tstamp_ptr);
174 wqe[0] = (uint64_t *)mbuf;
175 non_vec--;
176 wqe++;
177 }
178
179
180 if (flags & NIX_RX_OFFLOAD_SECURITY_F && loff) {
181 nix_sec_flush_meta(laddr, lmt_id, loff, aura_handle);
182 plt_io_wmb();
183 }
184}
185
186static __rte_always_inline uint16_t
187cn10k_sso_hws_get_work(struct cn10k_sso_hws *ws, struct rte_event *ev,
188 const uint32_t flags, void *lookup_mem)
189{
190 union {
191 __uint128_t get_work;
192 uint64_t u64[2];
193 } gw;
194 uint64_t tstamp_ptr;
195 uint64_t mbuf;
196
197 gw.get_work = ws->gw_wdata;
198#if defined(RTE_ARCH_ARM64) && !defined(__clang__)
199 asm volatile(
200 PLT_CPU_FEATURE_PREAMBLE
201 "caspl %[wdata], %H[wdata], %[wdata], %H[wdata], [%[gw_loc]]\n"
202 "sub %[mbuf], %H[wdata], #0x80 \n"
203 : [wdata] "+r"(gw.get_work), [mbuf] "=&r"(mbuf)
204 : [gw_loc] "r"(ws->base + SSOW_LF_GWS_OP_GET_WORK0)
205 : "memory");
206#else
207 plt_write64(gw.u64[0], ws->base + SSOW_LF_GWS_OP_GET_WORK0);
208 do {
209 roc_load_pair(gw.u64[0], gw.u64[1],
210 ws->base + SSOW_LF_GWS_WQE0);
211 } while (gw.u64[0] & BIT_ULL(63));
212 mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
213#endif
214 gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
215 (gw.u64[0] & (0x3FFull << 36)) << 4 |
216 (gw.u64[0] & 0xffffffff);
217
218 if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
219 if ((flags & CPT_RX_WQE_F) &&
220 (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
221 RTE_EVENT_TYPE_CRYPTODEV)) {
222 gw.u64[1] = cn10k_cpt_crypto_adapter_dequeue(gw.u64[1]);
223 } else if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
224 RTE_EVENT_TYPE_ETHDEV) {
225 uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
226
227 if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
228 struct rte_mbuf *m;
229 uintptr_t sa_base;
230 uint64_t iova = 0;
231 uint8_t loff = 0;
232 uint16_t d_off;
233 uint64_t cq_w1;
234
235 m = (struct rte_mbuf *)mbuf;
236 d_off = (uintptr_t)(m->buf_addr) - (uintptr_t)m;
237 d_off += RTE_PKTMBUF_HEADROOM;
238
239 cq_w1 = *(uint64_t *)(gw.u64[1] + 8);
240
241 sa_base =
242 cnxk_nix_sa_base_get(port, lookup_mem);
243 sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
244
245 mbuf = (uint64_t)nix_sec_meta_to_mbuf_sc(
246 cq_w1, sa_base, (uintptr_t)&iova, &loff,
247 (struct rte_mbuf *)mbuf, d_off);
248 if (loff)
249 roc_npa_aura_op_free(m->pool->pool_id,
250 0, iova);
251 }
252
253 gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
254 cn10k_wqe_to_mbuf(gw.u64[1], mbuf, port,
255 gw.u64[0] & 0xFFFFF, flags,
256 lookup_mem);
257
258 tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)
259 gw.u64[1]) +
260 CNXK_SSO_WQE_SG_PTR);
261 cnxk_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf,
262 ws->tstamp,
263 flags & NIX_RX_OFFLOAD_TSTAMP_F,
264 flags & NIX_RX_MULTI_SEG_F,
265 (uint64_t *)tstamp_ptr);
266 gw.u64[1] = mbuf;
267 } else if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
268 RTE_EVENT_TYPE_ETHDEV_VECTOR) {
269 uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
270 __uint128_t vwqe_hdr = *(__uint128_t *)gw.u64[1];
271
272 vwqe_hdr = ((vwqe_hdr >> 64) & 0xFFF) | BIT_ULL(31) |
273 ((vwqe_hdr & 0xFFFF) << 48) |
274 ((uint64_t)port << 32);
275 *(uint64_t *)gw.u64[1] = (uint64_t)vwqe_hdr;
276 cn10k_process_vwqe(gw.u64[1], port, flags, lookup_mem,
277 ws->tstamp, ws->lmt_base);
278 }
279 }
280
281 ev->event = gw.u64[0];
282 ev->u64 = gw.u64[1];
283
284 return !!gw.u64[1];
285}
286
287
288static __rte_always_inline uint16_t
289cn10k_sso_hws_get_work_empty(struct cn10k_sso_hws *ws, struct rte_event *ev)
290{
291 union {
292 __uint128_t get_work;
293 uint64_t u64[2];
294 } gw;
295 uint64_t mbuf;
296
297#ifdef RTE_ARCH_ARM64
298 asm volatile(PLT_CPU_FEATURE_PREAMBLE
299 " ldp %[tag], %[wqp], [%[tag_loc]] \n"
300 " tbz %[tag], 63, done%= \n"
301 " sevl \n"
302 "rty%=: wfe \n"
303 " ldp %[tag], %[wqp], [%[tag_loc]] \n"
304 " tbnz %[tag], 63, rty%= \n"
305 "done%=: dmb ld \n"
306 " sub %[mbuf], %[wqp], #0x80 \n"
307 : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
308 [mbuf] "=&r"(mbuf)
309 : [tag_loc] "r"(ws->base + SSOW_LF_GWS_WQE0)
310 : "memory");
311#else
312 do {
313 roc_load_pair(gw.u64[0], gw.u64[1],
314 ws->base + SSOW_LF_GWS_WQE0);
315 } while (gw.u64[0] & BIT_ULL(63));
316 mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
317#endif
318
319 gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
320 (gw.u64[0] & (0x3FFull << 36)) << 4 |
321 (gw.u64[0] & 0xffffffff);
322
323 if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
324 if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
325 RTE_EVENT_TYPE_ETHDEV) {
326 uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
327
328 gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
329 cn10k_wqe_to_mbuf(gw.u64[1], mbuf, port,
330 gw.u64[0] & 0xFFFFF, 0, NULL);
331 gw.u64[1] = mbuf;
332 }
333 }
334
335 ev->event = gw.u64[0];
336 ev->u64 = gw.u64[1];
337
338 return !!gw.u64[1];
339}
340
341
342uint16_t __rte_hot cn10k_sso_hws_enq(void *port, const struct rte_event *ev);
343uint16_t __rte_hot cn10k_sso_hws_enq_burst(void *port,
344 const struct rte_event ev[],
345 uint16_t nb_events);
346uint16_t __rte_hot cn10k_sso_hws_enq_new_burst(void *port,
347 const struct rte_event ev[],
348 uint16_t nb_events);
349uint16_t __rte_hot cn10k_sso_hws_enq_fwd_burst(void *port,
350 const struct rte_event ev[],
351 uint16_t nb_events);
352uint16_t __rte_hot cn10k_sso_hws_ca_enq(void *port, struct rte_event ev[],
353 uint16_t nb_events);
354
355#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
356 uint16_t __rte_hot cn10k_sso_hws_deq_##name( \
357 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
358 uint16_t __rte_hot cn10k_sso_hws_deq_burst_##name( \
359 void *port, struct rte_event ev[], uint16_t nb_events, \
360 uint64_t timeout_ticks); \
361 uint16_t __rte_hot cn10k_sso_hws_deq_tmo_##name( \
362 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
363 uint16_t __rte_hot cn10k_sso_hws_deq_tmo_burst_##name( \
364 void *port, struct rte_event ev[], uint16_t nb_events, \
365 uint64_t timeout_ticks); \
366 uint16_t __rte_hot cn10k_sso_hws_deq_ca_##name( \
367 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
368 uint16_t __rte_hot cn10k_sso_hws_deq_ca_burst_##name( \
369 void *port, struct rte_event ev[], uint16_t nb_events, \
370 uint64_t timeout_ticks); \
371 uint16_t __rte_hot cn10k_sso_hws_deq_seg_##name( \
372 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
373 uint16_t __rte_hot cn10k_sso_hws_deq_seg_burst_##name( \
374 void *port, struct rte_event ev[], uint16_t nb_events, \
375 uint64_t timeout_ticks); \
376 uint16_t __rte_hot cn10k_sso_hws_deq_tmo_seg_##name( \
377 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
378 uint16_t __rte_hot cn10k_sso_hws_deq_tmo_seg_burst_##name( \
379 void *port, struct rte_event ev[], uint16_t nb_events, \
380 uint64_t timeout_ticks); \
381 uint16_t __rte_hot cn10k_sso_hws_deq_ca_seg_##name( \
382 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
383 uint16_t __rte_hot cn10k_sso_hws_deq_ca_seg_burst_##name( \
384 void *port, struct rte_event ev[], uint16_t nb_events, \
385 uint64_t timeout_ticks);
386
387NIX_RX_FASTPATH_MODES
388#undef R
389
390static __rte_always_inline struct cn10k_eth_txq *
391cn10k_sso_hws_xtract_meta(struct rte_mbuf *m,
392 const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT])
393{
394 return (struct cn10k_eth_txq *)
395 txq_data[m->port][rte_event_eth_tx_adapter_txq_get(m)];
396}
397
398static __rte_always_inline void
399cn10k_sso_tx_one(struct rte_mbuf *m, uint64_t *cmd, uint16_t lmt_id,
400 uintptr_t lmt_addr, uint8_t sched_type, uintptr_t base,
401 const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT],
402 const uint32_t flags)
403{
404 uint8_t lnum = 0, loff = 0, shft = 0;
405 struct cn10k_eth_txq *txq;
406 uintptr_t laddr;
407 uint16_t segdw;
408 uintptr_t pa;
409 bool sec;
410
411 txq = cn10k_sso_hws_xtract_meta(m, txq_data);
412 cn10k_nix_tx_skeleton(txq, cmd, flags);
413
414
415
416 if (flags & NIX_TX_OFFLOAD_TSO_F)
417 cn10k_nix_xmit_prepare_tso(m, flags);
418
419 cn10k_nix_xmit_prepare(m, cmd, flags, txq->lso_tun_fmt, &sec);
420
421 laddr = lmt_addr;
422
423
424
425 if (flags & NIX_TX_OFFLOAD_SECURITY_F && sec)
426 cn10k_nix_prep_sec(m, cmd, &laddr, lmt_addr, &lnum, &loff,
427 &shft, txq->sa_base, flags);
428
429
430 cn10k_nix_xmit_mv_lmt_base(laddr, cmd, flags);
431
432 if (flags & NIX_TX_MULTI_SEG_F)
433 segdw = cn10k_nix_prepare_mseg(m, (uint64_t *)laddr, flags);
434 else
435 segdw = cn10k_nix_tx_ext_subs(flags) + 2;
436
437 if (flags & NIX_TX_OFFLOAD_SECURITY_F && sec)
438 pa = txq->cpt_io_addr | 3 << 4;
439 else
440 pa = txq->io_addr | ((segdw - 1) << 4);
441
442 if (!sched_type)
443 roc_sso_hws_head_wait(base + SSOW_LF_GWS_TAG);
444
445 roc_lmt_submit_steorl(lmt_id, pa);
446}
447
448static __rte_always_inline void
449cn10k_sso_vwqe_split_tx(struct rte_mbuf **mbufs, uint16_t nb_mbufs,
450 uint64_t *cmd, uint16_t lmt_id, uintptr_t lmt_addr,
451 uint8_t sched_type, uintptr_t base,
452 const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT],
453 const uint32_t flags)
454{
455 uint16_t port[4], queue[4];
456 uint16_t i, j, pkts, scalar;
457 struct cn10k_eth_txq *txq;
458
459 scalar = nb_mbufs & (NIX_DESCS_PER_LOOP - 1);
460 pkts = RTE_ALIGN_FLOOR(nb_mbufs, NIX_DESCS_PER_LOOP);
461
462 for (i = 0; i < pkts; i += NIX_DESCS_PER_LOOP) {
463 port[0] = mbufs[i]->port;
464 port[1] = mbufs[i + 1]->port;
465 port[2] = mbufs[i + 2]->port;
466 port[3] = mbufs[i + 3]->port;
467
468 queue[0] = rte_event_eth_tx_adapter_txq_get(mbufs[i]);
469 queue[1] = rte_event_eth_tx_adapter_txq_get(mbufs[i + 1]);
470 queue[2] = rte_event_eth_tx_adapter_txq_get(mbufs[i + 2]);
471 queue[3] = rte_event_eth_tx_adapter_txq_get(mbufs[i + 3]);
472
473 if (((port[0] ^ port[1]) & (port[2] ^ port[3])) ||
474 ((queue[0] ^ queue[1]) & (queue[2] ^ queue[3]))) {
475 for (j = 0; j < 4; j++)
476 cn10k_sso_tx_one(mbufs[i + j], cmd, lmt_id,
477 lmt_addr, sched_type, base,
478 txq_data, flags);
479 } else {
480 txq = (struct cn10k_eth_txq *)
481 txq_data[port[0]][queue[0]];
482 cn10k_nix_xmit_pkts_vector(txq, &mbufs[i], 4, cmd,
483 base + SSOW_LF_GWS_TAG,
484 flags | NIX_TX_VWQE_F);
485 }
486 }
487
488 mbufs += i;
489
490 for (i = 0; i < scalar; i++) {
491 cn10k_sso_tx_one(mbufs[i], cmd, lmt_id, lmt_addr, sched_type,
492 base, txq_data, flags);
493 }
494}
495
496static __rte_always_inline uint16_t
497cn10k_sso_hws_event_tx(struct cn10k_sso_hws *ws, struct rte_event *ev,
498 uint64_t *cmd,
499 const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT],
500 const uint32_t flags)
501{
502 struct cn10k_eth_txq *txq;
503 struct rte_mbuf *m;
504 uintptr_t lmt_addr;
505 uint16_t ref_cnt;
506 uint16_t lmt_id;
507
508 lmt_addr = ws->lmt_base;
509 ROC_LMT_BASE_ID_GET(lmt_addr, lmt_id);
510
511 if (ev->event_type & RTE_EVENT_TYPE_VECTOR) {
512 struct rte_mbuf **mbufs = ev->vec->mbufs;
513 uint64_t meta = *(uint64_t *)ev->vec;
514
515 if (meta & BIT(31)) {
516 txq = (struct cn10k_eth_txq *)
517 txq_data[meta >> 32][meta >> 48];
518
519 cn10k_nix_xmit_pkts_vector(
520 txq, mbufs, meta & 0xFFFF, cmd,
521 ws->tx_base + SSOW_LF_GWS_TAG,
522 flags | NIX_TX_VWQE_F);
523 } else {
524 cn10k_sso_vwqe_split_tx(
525 mbufs, meta & 0xFFFF, cmd, lmt_id, lmt_addr,
526 ev->sched_type, ws->tx_base, txq_data, flags);
527 }
528 rte_mempool_put(rte_mempool_from_obj(ev->vec), ev->vec);
529 return (meta & 0xFFFF);
530 }
531
532 m = ev->mbuf;
533 ref_cnt = m->refcnt;
534 cn10k_sso_tx_one(m, cmd, lmt_id, lmt_addr, ev->sched_type, ws->tx_base,
535 txq_data, flags);
536
537 if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
538 if (ref_cnt > 1)
539 return 1;
540 }
541
542 cnxk_sso_hws_swtag_flush(ws->tx_base + SSOW_LF_GWS_TAG,
543 ws->tx_base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
544 return 1;
545}
546
547#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
548 uint16_t __rte_hot cn10k_sso_hws_tx_adptr_enq_##name( \
549 void *port, struct rte_event ev[], uint16_t nb_events); \
550 uint16_t __rte_hot cn10k_sso_hws_tx_adptr_enq_seg_##name( \
551 void *port, struct rte_event ev[], uint16_t nb_events); \
552 uint16_t __rte_hot cn10k_sso_hws_dual_tx_adptr_enq_##name( \
553 void *port, struct rte_event ev[], uint16_t nb_events); \
554 uint16_t __rte_hot cn10k_sso_hws_dual_tx_adptr_enq_seg_##name( \
555 void *port, struct rte_event ev[], uint16_t nb_events);
556
557NIX_TX_FASTPATH_MODES
558#undef T
559
560#endif
561