1
2
3
4#ifndef __CNXK_ETHDEV_H__
5#define __CNXK_ETHDEV_H__
6
7#include <math.h>
8#include <stdint.h>
9
10#include <ethdev_driver.h>
11#include <ethdev_pci.h>
12#include <rte_kvargs.h>
13#include <rte_mbuf.h>
14#include <rte_mbuf_pool_ops.h>
15#include <rte_mempool.h>
16#include <rte_mtr_driver.h>
17#include <rte_security.h>
18#include <rte_security_driver.h>
19#include <rte_tailq.h>
20#include <rte_time.h>
21#include <rte_tm_driver.h>
22
23#include "roc_api.h"
24
25#define CNXK_ETH_DEV_PMD_VERSION "1.0"
26
27
28#define CNXK_LINK_CFG_IN_PROGRESS_F BIT_ULL(0)
29
30
31
32
33#define CNXK_NIX_MAX_VTAG_INS 2
34#define CNXK_NIX_MAX_VTAG_ACT_SIZE (4 * CNXK_NIX_MAX_VTAG_INS)
35
36
37#define CNXK_NIX_L2_OVERHEAD (RTE_ETHER_HDR_LEN + \
38 RTE_ETHER_CRC_LEN + \
39 CNXK_NIX_MAX_VTAG_ACT_SIZE)
40
41#define CNXK_NIX_RX_MIN_DESC 16
42#define CNXK_NIX_RX_MIN_DESC_ALIGN 16
43#define CNXK_NIX_RX_NB_SEG_MAX 6
44#define CNXK_NIX_RX_DEFAULT_RING_SZ 4096
45
46#define CNXK_NIX_TX_MAX_SQB 512
47
48
49
50
51#if defined(RTE_LIBRTE_IEEE1588)
52#define CNXK_NIX_TX_NB_SEG_MAX 7
53#else
54#define CNXK_NIX_TX_NB_SEG_MAX 9
55#endif
56
57#define CNXK_NIX_TX_MSEG_SG_DWORDS \
58 ((RTE_ALIGN_MUL_CEIL(CNXK_NIX_TX_NB_SEG_MAX, 3) / 3) + \
59 CNXK_NIX_TX_NB_SEG_MAX)
60
61#define CNXK_NIX_RSS_L3_L4_SRC_DST \
62 (RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY | \
63 RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY)
64
65#define CNXK_NIX_RSS_OFFLOAD \
66 (RTE_ETH_RSS_PORT | RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP | \
67 RTE_ETH_RSS_TCP | RTE_ETH_RSS_SCTP | RTE_ETH_RSS_TUNNEL | \
68 RTE_ETH_RSS_L2_PAYLOAD | CNXK_NIX_RSS_L3_L4_SRC_DST | \
69 RTE_ETH_RSS_LEVEL_MASK | RTE_ETH_RSS_C_VLAN)
70
71#define CNXK_NIX_TX_OFFLOAD_CAPA \
72 (RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | RTE_ETH_TX_OFFLOAD_MT_LOCKFREE | \
73 RTE_ETH_TX_OFFLOAD_VLAN_INSERT | RTE_ETH_TX_OFFLOAD_QINQ_INSERT | \
74 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM | \
75 RTE_ETH_TX_OFFLOAD_TCP_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
76 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_TSO | \
77 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | \
78 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \
79 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_SECURITY)
80
81#define CNXK_NIX_RX_OFFLOAD_CAPA \
82 (RTE_ETH_RX_OFFLOAD_CHECKSUM | RTE_ETH_RX_OFFLOAD_SCTP_CKSUM | \
83 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | RTE_ETH_RX_OFFLOAD_SCATTER | \
84 RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_RSS_HASH | \
85 RTE_ETH_RX_OFFLOAD_TIMESTAMP | RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
86 RTE_ETH_RX_OFFLOAD_SECURITY)
87
88#define RSS_IPV4_ENABLE \
89 (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
90 RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
91 RTE_ETH_RSS_NONFRAG_IPV4_SCTP)
92
93#define RSS_IPV6_ENABLE \
94 (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | \
95 RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
96 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
97
98#define RSS_IPV6_EX_ENABLE \
99 (RTE_ETH_RSS_IPV6_EX | RTE_ETH_RSS_IPV6_TCP_EX | RTE_ETH_RSS_IPV6_UDP_EX)
100
101#define RSS_MAX_LEVELS 3
102
103#define RSS_IPV4_INDEX 0
104#define RSS_IPV6_INDEX 1
105#define RSS_TCP_INDEX 2
106#define RSS_UDP_INDEX 3
107#define RSS_SCTP_INDEX 4
108#define RSS_DMAC_INDEX 5
109
110
111#define CNXK_FLOW_ACTION_FLAG_DEFAULT 0xffff
112
113
114#define CNXK_CYCLECOUNTER_MASK 0xffffffffffffffffULL
115#define CNXK_NIX_TIMESYNC_RX_OFFSET 8
116
117#define PTYPE_NON_TUNNEL_WIDTH 16
118#define PTYPE_TUNNEL_WIDTH 12
119#define PTYPE_NON_TUNNEL_ARRAY_SZ BIT(PTYPE_NON_TUNNEL_WIDTH)
120#define PTYPE_TUNNEL_ARRAY_SZ BIT(PTYPE_TUNNEL_WIDTH)
121#define PTYPE_ARRAY_SZ \
122 ((PTYPE_NON_TUNNEL_ARRAY_SZ + PTYPE_TUNNEL_ARRAY_SZ) * sizeof(uint16_t))
123
124
125#define ERRCODE_ERRLEN_WIDTH 12
126#define ERR_ARRAY_SZ ((BIT(ERRCODE_ERRLEN_WIDTH)) * sizeof(uint32_t))
127
128
129#define CNXK_NIX_FASTPATH_LOOKUP_MEM "cnxk_nix_fastpath_lookup_mem"
130
131#define CNXK_NIX_UDP_TUN_BITMASK \
132 ((1ull << (RTE_MBUF_F_TX_TUNNEL_VXLAN >> 45)) | \
133 (1ull << (RTE_MBUF_F_TX_TUNNEL_GENEVE >> 45)))
134
135
136#define CNXK_ETHDEV_SEC_OUTB_EV_SUB 0xFFUL
137
138
139#define CNXK_ETHDEV_SPI_TAG_MASK 0xFFFFFUL
140
141#define CNXK_NIX_PFC_CHAN_COUNT 16
142
143#define CNXK_TM_MARK_VLAN_DEI BIT_ULL(0)
144#define CNXK_TM_MARK_IP_DSCP BIT_ULL(1)
145#define CNXK_TM_MARK_IP_ECN BIT_ULL(2)
146
147#define CNXK_TM_MARK_MASK \
148 (CNXK_TM_MARK_VLAN_DEI | CNXK_TM_MARK_IP_DSCP | CNXK_TM_MARK_IP_ECN)
149
150#define CNXK_TX_MARK_FMT_MASK (0xFFFFFFFFFFFFull)
151
152struct cnxk_fc_cfg {
153 enum rte_eth_fc_mode mode;
154 uint8_t rx_pause;
155 uint8_t tx_pause;
156};
157
158struct cnxk_pfc_cfg {
159 uint16_t class_en;
160 uint16_t pause_time;
161 uint16_t rx_pause_en;
162 uint16_t tx_pause_en;
163};
164
165struct cnxk_eth_qconf {
166 union {
167 struct rte_eth_txconf tx;
168 struct rte_eth_rxconf rx;
169 } conf;
170 struct rte_mempool *mp;
171 uint16_t nb_desc;
172 uint8_t valid;
173};
174
175struct cnxk_timesync_info {
176 uint8_t rx_ready;
177 uint64_t rx_tstamp;
178 uint64_t rx_tstamp_dynflag;
179 int tstamp_dynfield_offset;
180 rte_iova_t tx_tstamp_iova;
181 uint64_t *tx_tstamp;
182} __plt_cache_aligned;
183
184struct cnxk_meter_node {
185#define MAX_PRV_MTR_NODES 10
186 TAILQ_ENTRY(cnxk_meter_node) next;
187
188 uint32_t id;
189 struct cnxk_mtr_profile_node *profile;
190 struct cnxk_mtr_policy_node *policy;
191 uint32_t bpf_id;
192 uint32_t rq_num;
193 uint32_t *rq_id;
194 uint16_t level;
195 uint32_t prev_id[MAX_PRV_MTR_NODES];
196 uint32_t prev_cnt;
197 uint32_t next_id;
198 bool is_prev;
199 bool is_next;
200 struct rte_mtr_params params;
201 struct roc_nix_bpf_objs profs;
202 bool is_used;
203 uint32_t ref_cnt;
204};
205
206struct action_rss {
207 enum rte_eth_hash_function func;
208 uint32_t level;
209 uint64_t types;
210 uint32_t key_len;
211 uint32_t queue_num;
212 uint8_t *key;
213 uint16_t *queue;
214};
215
216struct policy_actions {
217 uint32_t action_fate;
218 union {
219 uint16_t queue;
220 uint32_t mtr_id;
221 struct action_rss *rss_desc;
222 };
223};
224
225struct cnxk_mtr_policy_node {
226 TAILQ_ENTRY(cnxk_mtr_policy_node) next;
227
228 uint32_t id;
229 uint32_t mtr_id;
230 struct rte_mtr_meter_policy_params policy;
231 struct policy_actions actions[RTE_COLORS];
232 uint32_t ref_cnt;
233};
234
235struct cnxk_mtr_profile_node {
236 TAILQ_ENTRY(cnxk_mtr_profile_node) next;
237 struct rte_mtr_meter_profile profile;
238 uint32_t ref_cnt;
239 uint32_t id;
240};
241
242TAILQ_HEAD(cnxk_mtr_profiles, cnxk_mtr_profile_node);
243TAILQ_HEAD(cnxk_mtr_policy, cnxk_mtr_policy_node);
244TAILQ_HEAD(cnxk_mtr, cnxk_meter_node);
245
246
247struct cnxk_eth_sec_sess {
248
249 TAILQ_ENTRY(cnxk_eth_sec_sess) entry;
250
251
252
253
254 void *sa;
255
256
257 uint32_t sa_idx;
258
259
260 uint32_t spi;
261
262
263 struct rte_security_session *sess;
264
265
266 bool inb;
267
268
269 bool inl_dev;
270};
271
272TAILQ_HEAD(cnxk_eth_sec_sess_list, cnxk_eth_sec_sess);
273
274
275struct cnxk_eth_dev_sec_inb {
276
277 uint16_t max_spi;
278
279
280 bool inl_dev;
281
282
283 bool no_inl_dev;
284
285
286 uint16_t nb_sess;
287
288
289 struct cnxk_eth_sec_sess_list list;
290
291
292 void *sa_dptr;
293
294
295 rte_spinlock_t lock;
296};
297
298
299struct cnxk_eth_dev_sec_outb {
300
301 uint16_t max_sa;
302
303
304 uint32_t nb_desc;
305
306
307 struct plt_bitmap *sa_bmap;
308
309
310 void *sa_bmap_mem;
311
312
313 uint64_t sa_base;
314
315
316 struct roc_cpt_lf *lf_base;
317
318
319 uint16_t nb_crypto_qs;
320
321
322 uint64_t *fc_sw_mem;
323
324
325 uint16_t nb_sess;
326
327
328 struct cnxk_eth_sec_sess_list list;
329
330
331 void *sa_dptr;
332
333
334 rte_spinlock_t lock;
335};
336
337struct cnxk_eth_dev {
338
339 struct roc_nix nix;
340
341
342 struct roc_npc npc;
343
344
345 struct roc_nix_rq *rqs;
346 struct roc_nix_sq *sqs;
347 struct roc_nix_cq *cqs;
348
349
350 uint16_t nb_rxq;
351 uint16_t nb_txq;
352 uint16_t nb_rxq_sso;
353 uint8_t configured;
354
355
356 uint8_t dmac_filter_count;
357 uint8_t max_mac_entries;
358 bool dmac_filter_enable;
359
360 uint16_t flags;
361 uint8_t ptype_disable;
362 bool scalar_ena;
363 bool tx_mark;
364 bool ptp_en;
365 bool rx_mark_update;
366
367
368 struct rte_eth_dev *eth_dev;
369
370
371 union {
372 struct {
373 uint64_t cq_min_4k : 1;
374 uint64_t ipsecd_drop_re_dis : 1;
375 uint64_t vec_drop_re_dis : 1;
376 };
377 uint64_t hwcap;
378 };
379
380
381 uint64_t rx_offload_capa;
382 uint64_t tx_offload_capa;
383 uint32_t speed_capa;
384
385 uint64_t rx_offloads;
386 uint64_t tx_offloads;
387
388 uint16_t rx_offload_flags;
389 uint16_t tx_offload_flags;
390
391
392 uint64_t ethdev_rss_hf;
393
394
395 struct cnxk_eth_qconf *tx_qconf;
396 struct cnxk_eth_qconf *rx_qconf;
397
398
399 struct cnxk_pfc_cfg pfc_cfg;
400 struct cnxk_fc_cfg fc_cfg;
401
402
403 struct cnxk_timesync_info tstamp;
404 struct rte_timecounter systime_tc;
405 struct rte_timecounter rx_tstamp_tc;
406 struct rte_timecounter tx_tstamp_tc;
407 double clk_freq_mult;
408 uint64_t clk_delta;
409
410
411 enum roc_nix_bpf_color precolor_tbl[ROC_NIX_BPF_PRE_COLOR_MAX];
412 struct cnxk_mtr_profiles mtr_profiles;
413 struct cnxk_mtr_policy mtr_policy;
414 struct cnxk_mtr mtr;
415
416
417 eth_rx_burst_t rx_pkt_burst_no_offload;
418
419
420 uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
421
422
423 uint64_t lso_tun_fmt;
424
425
426 uint32_t txq_stat_map[RTE_ETHDEV_QUEUE_STAT_CNTRS];
427 uint32_t rxq_stat_map[RTE_ETHDEV_QUEUE_STAT_CNTRS];
428
429
430 struct cnxk_eth_dev_sec_inb inb;
431 struct cnxk_eth_dev_sec_outb outb;
432
433
434 int reass_dynfield_off;
435 int reass_dynflag_bit;
436};
437
438struct cnxk_eth_rxq_sp {
439 struct cnxk_eth_dev *dev;
440 struct cnxk_eth_qconf qconf;
441 uint16_t qid;
442 uint8_t tx_pause;
443 uint8_t tc;
444} __plt_cache_aligned;
445
446struct cnxk_eth_txq_sp {
447 struct cnxk_eth_dev *dev;
448 struct cnxk_eth_qconf qconf;
449 uint16_t qid;
450} __plt_cache_aligned;
451
452static inline struct cnxk_eth_dev *
453cnxk_eth_pmd_priv(const struct rte_eth_dev *eth_dev)
454{
455 return eth_dev->data->dev_private;
456}
457
458static inline struct cnxk_eth_rxq_sp *
459cnxk_eth_rxq_to_sp(void *__rxq)
460{
461 return ((struct cnxk_eth_rxq_sp *)__rxq) - 1;
462}
463
464static inline struct cnxk_eth_txq_sp *
465cnxk_eth_txq_to_sp(void *__txq)
466{
467 return ((struct cnxk_eth_txq_sp *)__txq) - 1;
468}
469
470
471extern struct eth_dev_ops cnxk_eth_dev_ops;
472
473
474extern struct rte_flow_ops cnxk_flow_ops;
475
476
477extern struct rte_security_ops cnxk_eth_sec_ops;
478
479
480extern struct rte_tm_ops cnxk_tm_ops;
481
482
483int cnxk_nix_probe(struct rte_pci_driver *pci_drv,
484 struct rte_pci_device *pci_dev);
485int cnxk_nix_remove(struct rte_pci_device *pci_dev);
486int cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu);
487int cnxk_nix_mc_addr_list_configure(struct rte_eth_dev *eth_dev,
488 struct rte_ether_addr *mc_addr_set,
489 uint32_t nb_mc_addr);
490int cnxk_nix_mac_addr_add(struct rte_eth_dev *eth_dev,
491 struct rte_ether_addr *addr, uint32_t index,
492 uint32_t pool);
493void cnxk_nix_mac_addr_del(struct rte_eth_dev *eth_dev, uint32_t index);
494int cnxk_nix_mac_addr_set(struct rte_eth_dev *eth_dev,
495 struct rte_ether_addr *addr);
496int cnxk_nix_promisc_enable(struct rte_eth_dev *eth_dev);
497int cnxk_nix_promisc_disable(struct rte_eth_dev *eth_dev);
498int cnxk_nix_allmulticast_enable(struct rte_eth_dev *eth_dev);
499int cnxk_nix_allmulticast_disable(struct rte_eth_dev *eth_dev);
500int cnxk_nix_info_get(struct rte_eth_dev *eth_dev,
501 struct rte_eth_dev_info *dev_info);
502int cnxk_nix_rx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
503 struct rte_eth_burst_mode *mode);
504int cnxk_nix_tx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
505 struct rte_eth_burst_mode *mode);
506int cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
507 struct rte_eth_fc_conf *fc_conf);
508int cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
509 struct rte_eth_fc_conf *fc_conf);
510int cnxk_nix_priority_flow_ctrl_queue_config(struct rte_eth_dev *eth_dev,
511 struct rte_eth_pfc_queue_conf *pfc_conf);
512int cnxk_nix_priority_flow_ctrl_queue_info_get(struct rte_eth_dev *eth_dev,
513 struct rte_eth_pfc_queue_info *pfc_info);
514int cnxk_nix_set_link_up(struct rte_eth_dev *eth_dev);
515int cnxk_nix_set_link_down(struct rte_eth_dev *eth_dev);
516int cnxk_nix_get_module_info(struct rte_eth_dev *eth_dev,
517 struct rte_eth_dev_module_info *modinfo);
518int cnxk_nix_get_module_eeprom(struct rte_eth_dev *eth_dev,
519 struct rte_dev_eeprom_info *info);
520int cnxk_nix_rx_queue_intr_enable(struct rte_eth_dev *eth_dev,
521 uint16_t rx_queue_id);
522int cnxk_nix_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
523 uint16_t rx_queue_id);
524int cnxk_nix_pool_ops_supported(struct rte_eth_dev *eth_dev, const char *pool);
525int cnxk_nix_tx_done_cleanup(void *txq, uint32_t free_cnt);
526int cnxk_nix_flow_ops_get(struct rte_eth_dev *eth_dev,
527 const struct rte_flow_ops **ops);
528int cnxk_nix_configure(struct rte_eth_dev *eth_dev);
529int cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
530 uint16_t nb_desc, uint16_t fp_tx_q_sz,
531 const struct rte_eth_txconf *tx_conf);
532int cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
533 uint32_t nb_desc, uint16_t fp_rx_q_sz,
534 const struct rte_eth_rxconf *rx_conf,
535 struct rte_mempool *mp);
536int cnxk_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid);
537int cnxk_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid);
538int cnxk_nix_dev_start(struct rte_eth_dev *eth_dev);
539int cnxk_nix_timesync_enable(struct rte_eth_dev *eth_dev);
540int cnxk_nix_timesync_disable(struct rte_eth_dev *eth_dev);
541int cnxk_nix_timesync_read_rx_timestamp(struct rte_eth_dev *eth_dev,
542 struct timespec *timestamp,
543 uint32_t flags);
544int cnxk_nix_timesync_read_tx_timestamp(struct rte_eth_dev *eth_dev,
545 struct timespec *timestamp);
546int cnxk_nix_timesync_read_time(struct rte_eth_dev *eth_dev,
547 struct timespec *ts);
548int cnxk_nix_timesync_write_time(struct rte_eth_dev *eth_dev,
549 const struct timespec *ts);
550int cnxk_nix_timesync_adjust_time(struct rte_eth_dev *eth_dev, int64_t delta);
551int cnxk_nix_tsc_convert(struct cnxk_eth_dev *dev);
552int cnxk_nix_read_clock(struct rte_eth_dev *eth_dev, uint64_t *clock);
553
554uint64_t cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev);
555int cnxk_nix_tm_ops_get(struct rte_eth_dev *eth_dev, void *ops);
556int cnxk_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
557 uint16_t queue_idx, uint16_t tx_rate);
558int cnxk_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green,
559 int mark_yellow, int mark_red,
560 struct rte_tm_error *error);
561int cnxk_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green,
562 int mark_yellow, int mark_red,
563 struct rte_tm_error *error);
564int cnxk_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green,
565 int mark_yellow, int mark_red,
566 struct rte_tm_error *error);
567
568
569int cnxk_nix_mtr_ops_get(struct rte_eth_dev *dev, void *ops);
570
571
572uint32_t cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
573 uint8_t rss_level);
574int cnxk_nix_reta_update(struct rte_eth_dev *eth_dev,
575 struct rte_eth_rss_reta_entry64 *reta_conf,
576 uint16_t reta_size);
577int cnxk_nix_reta_query(struct rte_eth_dev *eth_dev,
578 struct rte_eth_rss_reta_entry64 *reta_conf,
579 uint16_t reta_size);
580int cnxk_nix_rss_hash_update(struct rte_eth_dev *eth_dev,
581 struct rte_eth_rss_conf *rss_conf);
582int cnxk_nix_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
583 struct rte_eth_rss_conf *rss_conf);
584
585
586void cnxk_nix_toggle_flag_link_cfg(struct cnxk_eth_dev *dev, bool set);
587void cnxk_eth_dev_link_status_cb(struct roc_nix *nix,
588 struct roc_nix_link_info *link);
589void cnxk_eth_dev_link_status_get_cb(struct roc_nix *nix,
590 struct roc_nix_link_info *link);
591int cnxk_nix_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete);
592int cnxk_nix_queue_stats_mapping(struct rte_eth_dev *dev, uint16_t queue_id,
593 uint8_t stat_idx, uint8_t is_rx);
594int cnxk_nix_stats_reset(struct rte_eth_dev *dev);
595int cnxk_nix_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
596int cnxk_nix_xstats_get(struct rte_eth_dev *eth_dev,
597 struct rte_eth_xstat *xstats, unsigned int n);
598int cnxk_nix_xstats_get_names(struct rte_eth_dev *eth_dev,
599 struct rte_eth_xstat_name *xstats_names,
600 unsigned int limit);
601int cnxk_nix_xstats_get_names_by_id(struct rte_eth_dev *eth_dev,
602 const uint64_t *ids,
603 struct rte_eth_xstat_name *xstats_names,
604 unsigned int limit);
605int cnxk_nix_xstats_get_by_id(struct rte_eth_dev *eth_dev, const uint64_t *ids,
606 uint64_t *values, unsigned int n);
607int cnxk_nix_xstats_reset(struct rte_eth_dev *eth_dev);
608int cnxk_nix_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
609 size_t fw_size);
610void cnxk_nix_rxq_info_get(struct rte_eth_dev *eth_dev, uint16_t qid,
611 struct rte_eth_rxq_info *qinfo);
612void cnxk_nix_txq_info_get(struct rte_eth_dev *eth_dev, uint16_t qid,
613 struct rte_eth_txq_info *qinfo);
614
615
616int cnxk_nix_rx_descriptor_status(void *rxq, uint16_t offset);
617int cnxk_nix_tx_descriptor_status(void *txq, uint16_t offset);
618uint32_t cnxk_nix_rx_queue_count(void *rxq);
619
620
621const uint32_t *cnxk_nix_supported_ptypes_get(struct rte_eth_dev *eth_dev);
622void *cnxk_nix_fastpath_lookup_mem_get(void);
623
624
625int cnxk_ethdev_parse_devargs(struct rte_devargs *devargs,
626 struct cnxk_eth_dev *dev);
627
628
629int cnxk_nix_dev_get_reg(struct rte_eth_dev *eth_dev,
630 struct rte_dev_reg_info *regs);
631
632int cnxk_eth_outb_sa_idx_get(struct cnxk_eth_dev *dev, uint32_t *idx_p,
633 uint32_t spi);
634int cnxk_eth_outb_sa_idx_put(struct cnxk_eth_dev *dev, uint32_t idx);
635int cnxk_nix_lookup_mem_sa_base_set(struct cnxk_eth_dev *dev);
636int cnxk_nix_lookup_mem_sa_base_clear(struct cnxk_eth_dev *dev);
637__rte_internal
638int cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev);
639struct cnxk_eth_sec_sess *cnxk_eth_sec_sess_get_by_spi(struct cnxk_eth_dev *dev,
640 uint32_t spi, bool inb);
641struct cnxk_eth_sec_sess *
642cnxk_eth_sec_sess_get_by_sess(struct cnxk_eth_dev *dev,
643 struct rte_security_session *sess);
644
645
646int nix_recalc_mtu(struct rte_eth_dev *eth_dev);
647int nix_mtr_validate(struct rte_eth_dev *dev, uint32_t id);
648int nix_mtr_policy_act_get(struct rte_eth_dev *eth_dev, uint32_t id,
649 struct cnxk_mtr_policy_node **policy);
650int nix_mtr_rq_update(struct rte_eth_dev *eth_dev, uint32_t id,
651 uint32_t queue_num, const uint16_t *queue);
652int nix_mtr_chain_update(struct rte_eth_dev *eth_dev, uint32_t cur_id,
653 uint32_t prev_id, uint32_t next_id);
654int nix_mtr_chain_reset(struct rte_eth_dev *eth_dev, uint32_t cur_id);
655struct cnxk_meter_node *nix_get_mtr(struct rte_eth_dev *eth_dev,
656 uint32_t cur_id);
657int nix_mtr_level_update(struct rte_eth_dev *eth_dev, uint32_t id,
658 uint32_t level);
659int nix_mtr_capabilities_init(struct rte_eth_dev *eth_dev);
660int nix_mtr_configure(struct rte_eth_dev *eth_dev, uint32_t id);
661int nix_mtr_connect(struct rte_eth_dev *eth_dev, uint32_t id);
662int nix_mtr_destroy(struct rte_eth_dev *eth_dev, uint32_t id,
663 struct rte_mtr_error *error);
664int nix_mtr_color_action_validate(struct rte_eth_dev *eth_dev, uint32_t id,
665 uint32_t *prev_id, uint32_t *next_id,
666 struct cnxk_mtr_policy_node *policy,
667 int *tree_level);
668int nix_priority_flow_ctrl_rq_conf(struct rte_eth_dev *eth_dev, uint16_t qid,
669 uint8_t tx_pause, uint8_t tc);
670int nix_priority_flow_ctrl_sq_conf(struct rte_eth_dev *eth_dev, uint16_t qid,
671 uint8_t rx_pause, uint8_t tc);
672
673
674static __rte_always_inline uint64_t
675cnxk_pktmbuf_detach(struct rte_mbuf *m)
676{
677 struct rte_mempool *mp = m->pool;
678 uint32_t mbuf_size, buf_len;
679 struct rte_mbuf *md;
680 uint16_t priv_size;
681 uint16_t refcount;
682
683
684 md = rte_mbuf_from_indirect(m);
685 refcount = rte_mbuf_refcnt_update(md, -1);
686
687 priv_size = rte_pktmbuf_priv_size(mp);
688 mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
689 buf_len = rte_pktmbuf_data_room_size(mp);
690
691 m->priv_size = priv_size;
692 m->buf_addr = (char *)m + mbuf_size;
693 m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
694 m->buf_len = (uint16_t)buf_len;
695 rte_pktmbuf_reset_headroom(m);
696 m->data_len = 0;
697 m->ol_flags = 0;
698 m->next = NULL;
699 m->nb_segs = 1;
700
701
702 rte_pktmbuf_free(m);
703
704 if (refcount == 0) {
705 rte_mbuf_refcnt_set(md, 1);
706 md->data_len = 0;
707 md->ol_flags = 0;
708 md->next = NULL;
709 md->nb_segs = 1;
710 return 0;
711 } else {
712 return 1;
713 }
714}
715
716static __rte_always_inline uint64_t
717cnxk_nix_prefree_seg(struct rte_mbuf *m)
718{
719 if (likely(rte_mbuf_refcnt_read(m) == 1)) {
720 if (!RTE_MBUF_DIRECT(m))
721 return cnxk_pktmbuf_detach(m);
722
723 m->next = NULL;
724 m->nb_segs = 1;
725 return 0;
726 } else if (rte_mbuf_refcnt_update(m, -1) == 0) {
727 if (!RTE_MBUF_DIRECT(m))
728 return cnxk_pktmbuf_detach(m);
729
730 rte_mbuf_refcnt_set(m, 1);
731 m->next = NULL;
732 m->nb_segs = 1;
733 return 0;
734 }
735
736
737 return 1;
738}
739
740static inline rte_mbuf_timestamp_t *
741cnxk_nix_timestamp_dynfield(struct rte_mbuf *mbuf,
742 struct cnxk_timesync_info *info)
743{
744 return RTE_MBUF_DYNFIELD(mbuf, info->tstamp_dynfield_offset,
745 rte_mbuf_timestamp_t *);
746}
747
748static __rte_always_inline uintptr_t
749cnxk_nix_sa_base_get(uint16_t port, const void *lookup_mem)
750{
751 uintptr_t sa_base_tbl;
752
753 sa_base_tbl = (uintptr_t)lookup_mem;
754 sa_base_tbl += PTYPE_ARRAY_SZ + ERR_ARRAY_SZ;
755 return *((const uintptr_t *)sa_base_tbl + port);
756}
757
758#endif
759