1
2
3
4
5#ifndef __OTX2_WORKER_H__
6#define __OTX2_WORKER_H__
7
8#include <rte_common.h>
9#include <rte_branch_prediction.h>
10
11#include <otx2_common.h>
12#include "otx2_evdev.h"
13#include "otx2_evdev_crypto_adptr_rx.h"
14#include "otx2_ethdev_sec_tx.h"
15
16
17
18static __rte_always_inline uint16_t
19otx2_ssogws_get_work(struct otx2_ssogws *ws, struct rte_event *ev,
20 const uint32_t flags, const void * const lookup_mem)
21{
22 union otx2_sso_event event;
23 uint64_t tstamp_ptr;
24 uint64_t get_work1;
25 uint64_t mbuf;
26
27 otx2_write64(BIT_ULL(16) |
28 1,
29 ws->getwrk_op);
30
31 if (flags & NIX_RX_OFFLOAD_PTYPE_F)
32 rte_prefetch_non_temporal(lookup_mem);
33#ifdef RTE_ARCH_ARM64
34 asm volatile(
35 " ldr %[tag], [%[tag_loc]] \n"
36 " ldr %[wqp], [%[wqp_loc]] \n"
37 " tbz %[tag], 63, done%= \n"
38 " sevl \n"
39 "rty%=: wfe \n"
40 " ldr %[tag], [%[tag_loc]] \n"
41 " ldr %[wqp], [%[wqp_loc]] \n"
42 " tbnz %[tag], 63, rty%= \n"
43 "done%=: dmb ld \n"
44 " prfm pldl1keep, [%[wqp], #8] \n"
45 " sub %[mbuf], %[wqp], #0x80 \n"
46 " prfm pldl1keep, [%[mbuf]] \n"
47 : [tag] "=&r" (event.get_work0),
48 [wqp] "=&r" (get_work1),
49 [mbuf] "=&r" (mbuf)
50 : [tag_loc] "r" (ws->tag_op),
51 [wqp_loc] "r" (ws->wqp_op)
52 );
53#else
54 event.get_work0 = otx2_read64(ws->tag_op);
55 while ((BIT_ULL(63)) & event.get_work0)
56 event.get_work0 = otx2_read64(ws->tag_op);
57
58 get_work1 = otx2_read64(ws->wqp_op);
59 rte_prefetch0((const void *)get_work1);
60 mbuf = (uint64_t)((char *)get_work1 - sizeof(struct rte_mbuf));
61 rte_prefetch0((const void *)mbuf);
62#endif
63
64 event.get_work0 = (event.get_work0 & (0x3ull << 32)) << 6 |
65 (event.get_work0 & (0x3FFull << 36)) << 4 |
66 (event.get_work0 & 0xffffffff);
67
68 if (event.sched_type != SSO_TT_EMPTY) {
69 if ((flags & NIX_RX_OFFLOAD_SECURITY_F) &&
70 (event.event_type == RTE_EVENT_TYPE_CRYPTODEV)) {
71 get_work1 = otx2_handle_crypto_event(get_work1);
72 } else if (event.event_type == RTE_EVENT_TYPE_ETHDEV) {
73 otx2_wqe_to_mbuf(get_work1, mbuf, event.sub_event_type,
74 (uint32_t) event.get_work0, flags,
75 lookup_mem);
76
77 tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)
78 get_work1) +
79 OTX2_SSO_WQE_SG_PTR);
80 otx2_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf,
81 ws->tstamp, flags,
82 (uint64_t *)tstamp_ptr);
83 get_work1 = mbuf;
84 }
85 }
86
87 ev->event = event.get_work0;
88 ev->u64 = get_work1;
89
90 return !!get_work1;
91}
92
93
94static __rte_always_inline uint16_t
95otx2_ssogws_get_work_empty(struct otx2_ssogws *ws, struct rte_event *ev,
96 const uint32_t flags)
97{
98 union otx2_sso_event event;
99 uint64_t tstamp_ptr;
100 uint64_t get_work1;
101 uint64_t mbuf;
102
103#ifdef RTE_ARCH_ARM64
104 asm volatile(
105 " ldr %[tag], [%[tag_loc]] \n"
106 " ldr %[wqp], [%[wqp_loc]] \n"
107 " tbz %[tag], 63, done%= \n"
108 " sevl \n"
109 "rty%=: wfe \n"
110 " ldr %[tag], [%[tag_loc]] \n"
111 " ldr %[wqp], [%[wqp_loc]] \n"
112 " tbnz %[tag], 63, rty%= \n"
113 "done%=: dmb ld \n"
114 " prfm pldl1keep, [%[wqp], #8] \n"
115 " sub %[mbuf], %[wqp], #0x80 \n"
116 " prfm pldl1keep, [%[mbuf]] \n"
117 : [tag] "=&r" (event.get_work0),
118 [wqp] "=&r" (get_work1),
119 [mbuf] "=&r" (mbuf)
120 : [tag_loc] "r" (ws->tag_op),
121 [wqp_loc] "r" (ws->wqp_op)
122 );
123#else
124 event.get_work0 = otx2_read64(ws->tag_op);
125 while ((BIT_ULL(63)) & event.get_work0)
126 event.get_work0 = otx2_read64(ws->tag_op);
127
128 get_work1 = otx2_read64(ws->wqp_op);
129 rte_prefetch_non_temporal((const void *)get_work1);
130 mbuf = (uint64_t)((char *)get_work1 - sizeof(struct rte_mbuf));
131 rte_prefetch_non_temporal((const void *)mbuf);
132#endif
133
134 event.get_work0 = (event.get_work0 & (0x3ull << 32)) << 6 |
135 (event.get_work0 & (0x3FFull << 36)) << 4 |
136 (event.get_work0 & 0xffffffff);
137
138 if (event.sched_type != SSO_TT_EMPTY &&
139 event.event_type == RTE_EVENT_TYPE_ETHDEV) {
140 otx2_wqe_to_mbuf(get_work1, mbuf, event.sub_event_type,
141 (uint32_t) event.get_work0, flags, NULL);
142
143 tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)get_work1)
144 + OTX2_SSO_WQE_SG_PTR);
145 otx2_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, ws->tstamp,
146 flags, (uint64_t *)tstamp_ptr);
147 get_work1 = mbuf;
148 }
149
150 ev->event = event.get_work0;
151 ev->u64 = get_work1;
152
153 return !!get_work1;
154}
155
156static __rte_always_inline void
157otx2_ssogws_add_work(struct otx2_ssogws *ws, const uint64_t event_ptr,
158 const uint32_t tag, const uint8_t new_tt,
159 const uint16_t grp)
160{
161 uint64_t add_work0;
162
163 add_work0 = tag | ((uint64_t)(new_tt) << 32);
164 otx2_store_pair(add_work0, event_ptr, ws->grps_base[grp]);
165}
166
167static __rte_always_inline void
168otx2_ssogws_swtag_desched(struct otx2_ssogws *ws, uint32_t tag, uint8_t new_tt,
169 uint16_t grp)
170{
171 uint64_t val;
172
173 val = tag | ((uint64_t)(new_tt & 0x3) << 32) | ((uint64_t)grp << 34);
174 otx2_write64(val, ws->swtag_desched_op);
175}
176
177static __rte_always_inline void
178otx2_ssogws_swtag_norm(struct otx2_ssogws *ws, uint32_t tag, uint8_t new_tt)
179{
180 uint64_t val;
181
182 val = tag | ((uint64_t)(new_tt & 0x3) << 32);
183 otx2_write64(val, ws->swtag_norm_op);
184}
185
186static __rte_always_inline void
187otx2_ssogws_swtag_untag(struct otx2_ssogws *ws)
188{
189 otx2_write64(0, OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
190 SSOW_LF_GWS_OP_SWTAG_UNTAG);
191}
192
193static __rte_always_inline void
194otx2_ssogws_swtag_flush(uint64_t tag_op, uint64_t flush_op)
195{
196 if (OTX2_SSOW_TT_FROM_TAG(otx2_read64(tag_op)) == SSO_TT_EMPTY)
197 return;
198 otx2_write64(0, flush_op);
199}
200
201static __rte_always_inline void
202otx2_ssogws_desched(struct otx2_ssogws *ws)
203{
204 otx2_write64(0, OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
205 SSOW_LF_GWS_OP_DESCHED);
206}
207
208static __rte_always_inline void
209otx2_ssogws_swtag_wait(struct otx2_ssogws *ws)
210{
211#ifdef RTE_ARCH_ARM64
212 uint64_t swtp;
213
214 asm volatile(" ldr %[swtb], [%[swtp_loc]] \n"
215 " tbz %[swtb], 62, done%= \n"
216 " sevl \n"
217 "rty%=: wfe \n"
218 " ldr %[swtb], [%[swtp_loc]] \n"
219 " tbnz %[swtb], 62, rty%= \n"
220 "done%=: \n"
221 : [swtb] "=&r" (swtp)
222 : [swtp_loc] "r" (ws->tag_op));
223#else
224
225 while (otx2_read64(ws->tag_op) & BIT_ULL(62))
226 ;
227#endif
228}
229
230static __rte_always_inline void
231otx2_ssogws_head_wait(uint64_t tag_op)
232{
233#ifdef RTE_ARCH_ARM64
234 uint64_t tag;
235
236 asm volatile (
237 " ldr %[tag], [%[tag_op]] \n"
238 " tbnz %[tag], 35, done%= \n"
239 " sevl \n"
240 "rty%=: wfe \n"
241 " ldr %[tag], [%[tag_op]] \n"
242 " tbz %[tag], 35, rty%= \n"
243 "done%=: \n"
244 : [tag] "=&r" (tag)
245 : [tag_op] "r" (tag_op)
246 );
247#else
248
249 while (!(otx2_read64(tag_op) & BIT_ULL(35)))
250 ;
251#endif
252}
253
254static __rte_always_inline const struct otx2_eth_txq *
255otx2_ssogws_xtract_meta(struct rte_mbuf *m,
256 const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT])
257{
258 return (const struct otx2_eth_txq *)txq_data[m->port][
259 rte_event_eth_tx_adapter_txq_get(m)];
260}
261
262static __rte_always_inline void
263otx2_ssogws_prepare_pkt(const struct otx2_eth_txq *txq, struct rte_mbuf *m,
264 uint64_t *cmd, const uint32_t flags)
265{
266 otx2_lmt_mov(cmd, txq->cmd, otx2_nix_tx_ext_subs(flags));
267 otx2_nix_xmit_prepare(m, cmd, flags, txq->lso_tun_fmt);
268}
269
270static __rte_always_inline uint16_t
271otx2_ssogws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd,
272 const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT],
273 const uint32_t flags)
274{
275 struct rte_mbuf *m = ev->mbuf;
276 const struct otx2_eth_txq *txq;
277 uint16_t ref_cnt = m->refcnt;
278
279 if ((flags & NIX_TX_OFFLOAD_SECURITY_F) &&
280 (m->ol_flags & PKT_TX_SEC_OFFLOAD)) {
281 txq = otx2_ssogws_xtract_meta(m, txq_data);
282 return otx2_sec_event_tx(base, ev, m, txq, flags);
283 }
284
285
286 otx2_nix_xmit_prepare_tso(m, flags);
287
288
289
290
291
292 if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))
293 rte_io_wmb();
294 txq = otx2_ssogws_xtract_meta(m, txq_data);
295 otx2_ssogws_prepare_pkt(txq, m, cmd, flags);
296
297 if (flags & NIX_TX_MULTI_SEG_F) {
298 const uint16_t segdw = otx2_nix_prepare_mseg(m, cmd, flags);
299 otx2_nix_xmit_prepare_tstamp(cmd, &txq->cmd[0],
300 m->ol_flags, segdw, flags);
301 if (!ev->sched_type) {
302 otx2_nix_xmit_mseg_prep_lmt(cmd, txq->lmt_addr, segdw);
303 otx2_ssogws_head_wait(base + SSOW_LF_GWS_TAG);
304 if (otx2_nix_xmit_submit_lmt(txq->io_addr) == 0)
305 otx2_nix_xmit_mseg_one(cmd, txq->lmt_addr,
306 txq->io_addr, segdw);
307 } else {
308 otx2_nix_xmit_mseg_one(cmd, txq->lmt_addr,
309 txq->io_addr, segdw);
310 }
311 } else {
312
313 otx2_nix_xmit_prepare_tstamp(cmd, &txq->cmd[0],
314 m->ol_flags, 4, flags);
315
316 if (!ev->sched_type) {
317 otx2_nix_xmit_prep_lmt(cmd, txq->lmt_addr, flags);
318 otx2_ssogws_head_wait(base + SSOW_LF_GWS_TAG);
319 if (otx2_nix_xmit_submit_lmt(txq->io_addr) == 0)
320 otx2_nix_xmit_one(cmd, txq->lmt_addr,
321 txq->io_addr, flags);
322 } else {
323 otx2_nix_xmit_one(cmd, txq->lmt_addr, txq->io_addr,
324 flags);
325 }
326 }
327
328 if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
329 if (ref_cnt > 1)
330 return 1;
331 }
332
333 otx2_ssogws_swtag_flush(base + SSOW_LF_GWS_TAG,
334 base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
335
336 return 1;
337}
338
339#endif
340