dpdk/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(C) 2021 Marvell.
   3 */
   4
   5#include <rte_cryptodev.h>
   6#include <cryptodev_pmd.h>
   7#include <rte_event_crypto_adapter.h>
   8#include <rte_ip.h>
   9#include <rte_vect.h>
  10
  11#include "cn9k_cryptodev.h"
  12#include "cn9k_cryptodev_ops.h"
  13#include "cn9k_ipsec.h"
  14#include "cn9k_ipsec_la_ops.h"
  15#include "cnxk_ae.h"
  16#include "cnxk_cryptodev.h"
  17#include "cnxk_cryptodev_ops.h"
  18#include "cnxk_se.h"
  19
  20static __rte_always_inline int __rte_hot
  21cn9k_cpt_sym_inst_fill(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
  22                       struct cnxk_se_sess *sess,
  23                       struct cpt_inflight_req *infl_req,
  24                       struct cpt_inst_s *inst)
  25{
  26        uint64_t cpt_op;
  27        int ret;
  28
  29        cpt_op = sess->cpt_op;
  30
  31        if (cpt_op & ROC_SE_OP_CIPHER_MASK)
  32                ret = fill_fc_params(op, sess, &qp->meta_info, infl_req, inst);
  33        else
  34                ret = fill_digest_params(op, sess, &qp->meta_info, infl_req,
  35                                         inst);
  36
  37        return ret;
  38}
  39
  40static __rte_always_inline int __rte_hot
  41cn9k_cpt_sec_inst_fill(struct rte_crypto_op *op,
  42                       struct cpt_inflight_req *infl_req,
  43                       struct cpt_inst_s *inst)
  44{
  45        struct rte_crypto_sym_op *sym_op = op->sym;
  46        struct cn9k_sec_session *priv;
  47        struct cn9k_ipsec_sa *sa;
  48
  49        if (unlikely(sym_op->m_dst && sym_op->m_dst != sym_op->m_src)) {
  50                plt_dp_err("Out of place is not supported");
  51                return -ENOTSUP;
  52        }
  53
  54        if (unlikely(!rte_pktmbuf_is_contiguous(sym_op->m_src))) {
  55                plt_dp_err("Scatter Gather mode is not supported");
  56                return -ENOTSUP;
  57        }
  58
  59        priv = get_sec_session_private_data(op->sym->sec_session);
  60        sa = &priv->sa;
  61
  62        if (sa->dir == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
  63                return process_outb_sa(op, sa, inst);
  64
  65        infl_req->op_flags |= CPT_OP_FLAGS_IPSEC_DIR_INBOUND;
  66
  67        return process_inb_sa(op, sa, inst);
  68}
  69
  70static inline struct cnxk_se_sess *
  71cn9k_cpt_sym_temp_sess_create(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op)
  72{
  73        const int driver_id = cn9k_cryptodev_driver_id;
  74        struct rte_crypto_sym_op *sym_op = op->sym;
  75        struct rte_cryptodev_sym_session *sess;
  76        struct cnxk_se_sess *priv;
  77        int ret;
  78
  79        /* Create temporary session */
  80        sess = rte_cryptodev_sym_session_create(qp->sess_mp);
  81        if (sess == NULL)
  82                return NULL;
  83
  84        ret = sym_session_configure(qp->lf.roc_cpt, driver_id, sym_op->xform,
  85                                    sess, qp->sess_mp_priv);
  86        if (ret)
  87                goto sess_put;
  88
  89        priv = get_sym_session_private_data(sess, driver_id);
  90
  91        sym_op->session = sess;
  92
  93        return priv;
  94
  95sess_put:
  96        rte_mempool_put(qp->sess_mp, sess);
  97        return NULL;
  98}
  99
 100static inline int
 101cn9k_cpt_inst_prep(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
 102                   struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
 103{
 104        int ret;
 105
 106        if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
 107                struct rte_crypto_sym_op *sym_op;
 108                struct cnxk_se_sess *sess;
 109
 110                if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
 111                        sym_op = op->sym;
 112                        sess = get_sym_session_private_data(
 113                                sym_op->session, cn9k_cryptodev_driver_id);
 114                        ret = cn9k_cpt_sym_inst_fill(qp, op, sess, infl_req,
 115                                                     inst);
 116                        inst->w7.u64 = sess->cpt_inst_w7;
 117                } else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
 118                        ret = cn9k_cpt_sec_inst_fill(op, infl_req, inst);
 119                else {
 120                        sess = cn9k_cpt_sym_temp_sess_create(qp, op);
 121                        if (unlikely(sess == NULL)) {
 122                                plt_dp_err("Could not create temp session");
 123                                return -1;
 124                        }
 125
 126                        ret = cn9k_cpt_sym_inst_fill(qp, op, sess, infl_req,
 127                                                     inst);
 128                        if (unlikely(ret)) {
 129                                sym_session_clear(cn9k_cryptodev_driver_id,
 130                                                  op->sym->session);
 131                                rte_mempool_put(qp->sess_mp, op->sym->session);
 132                        }
 133                        inst->w7.u64 = sess->cpt_inst_w7;
 134                }
 135        } else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
 136                struct rte_crypto_asym_op *asym_op;
 137                struct cnxk_ae_sess *sess;
 138
 139                if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
 140                        asym_op = op->asym;
 141                        sess = get_asym_session_private_data(
 142                                asym_op->session, cn9k_cryptodev_driver_id);
 143                        ret = cnxk_ae_enqueue(qp, op, infl_req, inst, sess);
 144                        inst->w7.u64 = sess->cpt_inst_w7;
 145                } else {
 146                        ret = -EINVAL;
 147                }
 148        } else {
 149                ret = -EINVAL;
 150                plt_dp_err("Unsupported op type");
 151        }
 152
 153        return ret;
 154}
 155
 156static inline void
 157cn9k_cpt_inst_submit(struct cpt_inst_s *inst, uint64_t lmtline,
 158                     uint64_t io_addr)
 159{
 160        uint64_t lmt_status;
 161
 162        do {
 163                /* Copy CPT command to LMTLINE */
 164                roc_lmt_mov((void *)lmtline, inst, 2);
 165
 166                /*
 167                 * Make sure compiler does not reorder memcpy and ldeor.
 168                 * LMTST transactions are always flushed from the write
 169                 * buffer immediately, a DMB is not required to push out
 170                 * LMTSTs.
 171                 */
 172                rte_io_wmb();
 173                lmt_status = roc_lmt_submit_ldeor(io_addr);
 174        } while (lmt_status == 0);
 175}
 176
 177static __plt_always_inline void
 178cn9k_cpt_inst_submit_dual(struct cpt_inst_s *inst, uint64_t lmtline,
 179                          uint64_t io_addr)
 180{
 181        uint64_t lmt_status;
 182
 183        do {
 184                /* Copy 2 CPT inst_s to LMTLINE */
 185#if defined(RTE_ARCH_ARM64)
 186                uint64_t *s = (uint64_t *)inst;
 187                uint64_t *d = (uint64_t *)lmtline;
 188
 189                vst1q_u64(&d[0], vld1q_u64(&s[0]));
 190                vst1q_u64(&d[2], vld1q_u64(&s[2]));
 191                vst1q_u64(&d[4], vld1q_u64(&s[4]));
 192                vst1q_u64(&d[6], vld1q_u64(&s[6]));
 193                vst1q_u64(&d[8], vld1q_u64(&s[8]));
 194                vst1q_u64(&d[10], vld1q_u64(&s[10]));
 195                vst1q_u64(&d[12], vld1q_u64(&s[12]));
 196                vst1q_u64(&d[14], vld1q_u64(&s[14]));
 197#else
 198                roc_lmt_mov_seg((void *)lmtline, inst, 8);
 199#endif
 200
 201                /*
 202                 * Make sure compiler does not reorder memcpy and ldeor.
 203                 * LMTST transactions are always flushed from the write
 204                 * buffer immediately, a DMB is not required to push out
 205                 * LMTSTs.
 206                 */
 207                rte_io_wmb();
 208                lmt_status = roc_lmt_submit_ldeor(io_addr);
 209        } while (lmt_status == 0);
 210}
 211
 212static uint16_t
 213cn9k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
 214{
 215        struct cpt_inflight_req *infl_req_1, *infl_req_2;
 216        struct cpt_inst_s inst[2] __rte_cache_aligned;
 217        struct rte_crypto_op *op_1, *op_2;
 218        uint16_t nb_allowed, count = 0;
 219        struct cnxk_cpt_qp *qp = qptr;
 220        struct pending_queue *pend_q;
 221        uint64_t head;
 222        int ret;
 223
 224        pend_q = &qp->pend_q;
 225
 226        const uint64_t lmt_base = qp->lf.lmt_base;
 227        const uint64_t io_addr = qp->lf.io_addr;
 228        const uint64_t pq_mask = pend_q->pq_mask;
 229
 230        /* Clear w0, w2, w3 of both inst */
 231
 232        inst[0].w0.u64 = 0;
 233        inst[0].w2.u64 = 0;
 234        inst[0].w3.u64 = 0;
 235        inst[1].w0.u64 = 0;
 236        inst[1].w2.u64 = 0;
 237        inst[1].w3.u64 = 0;
 238
 239        head = pend_q->head;
 240        nb_allowed = pending_queue_free_cnt(head, pend_q->tail, pq_mask);
 241        nb_ops = RTE_MIN(nb_ops, nb_allowed);
 242
 243        if (unlikely(nb_ops & 1)) {
 244                op_1 = ops[0];
 245                infl_req_1 = &pend_q->req_queue[head];
 246                infl_req_1->op_flags = 0;
 247
 248                ret = cn9k_cpt_inst_prep(qp, op_1, infl_req_1, &inst[0]);
 249                if (unlikely(ret)) {
 250                        plt_dp_err("Could not process op: %p", op_1);
 251                        return 0;
 252                }
 253
 254                infl_req_1->cop = op_1;
 255                infl_req_1->res.cn9k.compcode = CPT_COMP_NOT_DONE;
 256                inst[0].res_addr = (uint64_t)&infl_req_1->res;
 257
 258                cn9k_cpt_inst_submit(&inst[0], lmt_base, io_addr);
 259                pending_queue_advance(&head, pq_mask);
 260                count++;
 261        }
 262
 263        while (count < nb_ops) {
 264                op_1 = ops[count];
 265                op_2 = ops[count + 1];
 266
 267                infl_req_1 = &pend_q->req_queue[head];
 268                pending_queue_advance(&head, pq_mask);
 269                infl_req_2 = &pend_q->req_queue[head];
 270                pending_queue_advance(&head, pq_mask);
 271
 272                infl_req_1->cop = op_1;
 273                infl_req_2->cop = op_2;
 274                infl_req_1->op_flags = 0;
 275                infl_req_2->op_flags = 0;
 276
 277                infl_req_1->res.cn9k.compcode = CPT_COMP_NOT_DONE;
 278                inst[0].res_addr = (uint64_t)&infl_req_1->res;
 279
 280                infl_req_2->res.cn9k.compcode = CPT_COMP_NOT_DONE;
 281                inst[1].res_addr = (uint64_t)&infl_req_2->res;
 282
 283                ret = cn9k_cpt_inst_prep(qp, op_1, infl_req_1, &inst[0]);
 284                if (unlikely(ret)) {
 285                        plt_dp_err("Could not process op: %p", op_1);
 286                        pending_queue_retreat(&head, pq_mask, 2);
 287                        break;
 288                }
 289
 290                ret = cn9k_cpt_inst_prep(qp, op_2, infl_req_2, &inst[1]);
 291                if (unlikely(ret)) {
 292                        plt_dp_err("Could not process op: %p", op_2);
 293                        pending_queue_retreat(&head, pq_mask, 1);
 294                        cn9k_cpt_inst_submit(&inst[0], lmt_base, io_addr);
 295                        count++;
 296                        break;
 297                }
 298
 299                cn9k_cpt_inst_submit_dual(&inst[0], lmt_base, io_addr);
 300
 301                count += 2;
 302        }
 303
 304        rte_atomic_thread_fence(__ATOMIC_RELEASE);
 305
 306        pend_q->head = head;
 307        pend_q->time_out = rte_get_timer_cycles() +
 308                           DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
 309
 310        return count;
 311}
 312
 313uint16_t
 314cn9k_cpt_crypto_adapter_enqueue(uintptr_t tag_op, struct rte_crypto_op *op)
 315{
 316        union rte_event_crypto_metadata *ec_mdata;
 317        struct cpt_inflight_req *infl_req;
 318        struct rte_event *rsp_info;
 319        struct cnxk_cpt_qp *qp;
 320        struct cpt_inst_s inst;
 321        uint8_t cdev_id;
 322        uint16_t qp_id;
 323        int ret;
 324
 325        ec_mdata = cnxk_event_crypto_mdata_get(op);
 326        if (!ec_mdata) {
 327                rte_errno = EINVAL;
 328                return 0;
 329        }
 330
 331        cdev_id = ec_mdata->request_info.cdev_id;
 332        qp_id = ec_mdata->request_info.queue_pair_id;
 333        qp = rte_cryptodevs[cdev_id].data->queue_pairs[qp_id];
 334        rsp_info = &ec_mdata->response_info;
 335
 336        if (unlikely(!qp->ca.enabled)) {
 337                rte_errno = EINVAL;
 338                return 0;
 339        }
 340
 341        if (unlikely(rte_mempool_get(qp->ca.req_mp, (void **)&infl_req))) {
 342                rte_errno = ENOMEM;
 343                return 0;
 344        }
 345        infl_req->op_flags = 0;
 346
 347        ret = cn9k_cpt_inst_prep(qp, op, infl_req, &inst);
 348        if (unlikely(ret)) {
 349                plt_dp_err("Could not process op: %p", op);
 350                rte_mempool_put(qp->ca.req_mp, infl_req);
 351                return 0;
 352        }
 353
 354        infl_req->cop = op;
 355        infl_req->res.cn9k.compcode = CPT_COMP_NOT_DONE;
 356        infl_req->qp = qp;
 357        inst.w0.u64 = 0;
 358        inst.res_addr = (uint64_t)&infl_req->res;
 359        inst.w2.u64 = CNXK_CPT_INST_W2(
 360                (RTE_EVENT_TYPE_CRYPTODEV << 28) | rsp_info->flow_id,
 361                rsp_info->sched_type, rsp_info->queue_id, 0);
 362        inst.w3.u64 = CNXK_CPT_INST_W3(1, infl_req);
 363
 364        if (roc_cpt_is_iq_full(&qp->lf)) {
 365                rte_mempool_put(qp->ca.req_mp, infl_req);
 366                rte_errno = EAGAIN;
 367                return 0;
 368        }
 369
 370        if (!rsp_info->sched_type)
 371                roc_sso_hws_head_wait(tag_op);
 372
 373        cn9k_cpt_inst_submit(&inst, qp->lmtline.lmt_base, qp->lmtline.io_addr);
 374
 375        return 1;
 376}
 377
 378static inline void
 379cn9k_cpt_sec_post_process(struct rte_crypto_op *cop,
 380                          struct cpt_inflight_req *infl_req)
 381{
 382        struct rte_crypto_sym_op *sym_op = cop->sym;
 383        struct rte_mbuf *m = sym_op->m_src;
 384        struct rte_ipv6_hdr *ip6;
 385        struct rte_ipv4_hdr *ip;
 386        uint16_t m_len = 0;
 387        char *data;
 388
 389        if (infl_req->op_flags & CPT_OP_FLAGS_IPSEC_DIR_INBOUND) {
 390                data = rte_pktmbuf_mtod(m, char *);
 391
 392                ip = (struct rte_ipv4_hdr *)(data + ROC_IE_ON_INB_RPTR_HDR);
 393
 394                if (((ip->version_ihl & 0xf0) >> RTE_IPV4_IHL_MULTIPLIER) ==
 395                    IPVERSION) {
 396                        m_len = rte_be_to_cpu_16(ip->total_length);
 397                } else {
 398                        PLT_ASSERT(((ip->version_ihl & 0xf0) >>
 399                                    RTE_IPV4_IHL_MULTIPLIER) == 6);
 400                        ip6 = (struct rte_ipv6_hdr *)ip;
 401                        m_len = rte_be_to_cpu_16(ip6->payload_len) +
 402                                sizeof(struct rte_ipv6_hdr);
 403                }
 404
 405                m->data_len = m_len;
 406                m->pkt_len = m_len;
 407                m->data_off += ROC_IE_ON_INB_RPTR_HDR;
 408        }
 409}
 410
 411static inline void
 412cn9k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp, struct rte_crypto_op *cop,
 413                              struct cpt_inflight_req *infl_req)
 414{
 415        struct cpt_cn9k_res_s *res = (struct cpt_cn9k_res_s *)&infl_req->res;
 416        unsigned int sz;
 417
 418        if (likely(res->compcode == CPT_COMP_GOOD)) {
 419                if (unlikely(res->uc_compcode)) {
 420                        if (res->uc_compcode == ROC_SE_ERR_GC_ICV_MISCOMPARE)
 421                                cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
 422                        else
 423                                cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
 424
 425                        plt_dp_info("Request failed with microcode error");
 426                        plt_dp_info("MC completion code 0x%x",
 427                                    res->uc_compcode);
 428                        goto temp_sess_free;
 429                }
 430
 431                cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
 432                if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
 433                        if (cop->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
 434                                cn9k_cpt_sec_post_process(cop, infl_req);
 435                                return;
 436                        }
 437
 438                        /* Verify authentication data if required */
 439                        if (unlikely(infl_req->op_flags &
 440                                     CPT_OP_FLAGS_AUTH_VERIFY)) {
 441                                uintptr_t *rsp = infl_req->mdata;
 442                                compl_auth_verify(cop, (uint8_t *)rsp[0],
 443                                                  rsp[1]);
 444                        }
 445                } else if (cop->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
 446                        struct rte_crypto_asym_op *op = cop->asym;
 447                        uintptr_t *mdata = infl_req->mdata;
 448                        struct cnxk_ae_sess *sess;
 449
 450                        sess = get_asym_session_private_data(
 451                                op->session, cn9k_cryptodev_driver_id);
 452
 453                        cnxk_ae_post_process(cop, sess, (uint8_t *)mdata[0]);
 454                }
 455        } else {
 456                cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
 457                plt_dp_info("HW completion code 0x%x", res->compcode);
 458
 459                switch (res->compcode) {
 460                case CPT_COMP_INSTERR:
 461                        plt_dp_err("Request failed with instruction error");
 462                        break;
 463                case CPT_COMP_FAULT:
 464                        plt_dp_err("Request failed with DMA fault");
 465                        break;
 466                case CPT_COMP_HWERR:
 467                        plt_dp_err("Request failed with hardware error");
 468                        break;
 469                default:
 470                        plt_dp_err(
 471                                "Request failed with unknown completion code");
 472                }
 473        }
 474
 475temp_sess_free:
 476        if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
 477                if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
 478                        sym_session_clear(cn9k_cryptodev_driver_id,
 479                                          cop->sym->session);
 480                        sz = rte_cryptodev_sym_get_existing_header_session_size(
 481                                cop->sym->session);
 482                        memset(cop->sym->session, 0, sz);
 483                        rte_mempool_put(qp->sess_mp, cop->sym->session);
 484                        cop->sym->session = NULL;
 485                }
 486        }
 487}
 488
 489uintptr_t
 490cn9k_cpt_crypto_adapter_dequeue(uintptr_t get_work1)
 491{
 492        struct cpt_inflight_req *infl_req;
 493        struct rte_crypto_op *cop;
 494        struct cnxk_cpt_qp *qp;
 495
 496        infl_req = (struct cpt_inflight_req *)(get_work1);
 497        cop = infl_req->cop;
 498        qp = infl_req->qp;
 499
 500        cn9k_cpt_dequeue_post_process(qp, infl_req->cop, infl_req);
 501
 502        if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))
 503                rte_mempool_put(qp->meta_info.pool, infl_req->mdata);
 504
 505        rte_mempool_put(qp->ca.req_mp, infl_req);
 506        return (uintptr_t)cop;
 507}
 508
 509static uint16_t
 510cn9k_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
 511{
 512        struct cpt_inflight_req *infl_req;
 513        struct cnxk_cpt_qp *qp = qptr;
 514        struct pending_queue *pend_q;
 515        struct cpt_cn9k_res_s *res;
 516        uint64_t infl_cnt, pq_tail;
 517        struct rte_crypto_op *cop;
 518        int i;
 519
 520        pend_q = &qp->pend_q;
 521
 522        const uint64_t pq_mask = pend_q->pq_mask;
 523
 524        pq_tail = pend_q->tail;
 525        infl_cnt = pending_queue_infl_cnt(pend_q->head, pq_tail, pq_mask);
 526        nb_ops = RTE_MIN(nb_ops, infl_cnt);
 527
 528        /* Ensure infl_cnt isn't read before data lands */
 529        rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
 530
 531        for (i = 0; i < nb_ops; i++) {
 532                infl_req = &pend_q->req_queue[pq_tail];
 533
 534                res = (struct cpt_cn9k_res_s *)&infl_req->res;
 535
 536                if (unlikely(res->compcode == CPT_COMP_NOT_DONE)) {
 537                        if (unlikely(rte_get_timer_cycles() >
 538                                     pend_q->time_out)) {
 539                                plt_err("Request timed out");
 540                                pend_q->time_out = rte_get_timer_cycles() +
 541                                                   DEFAULT_COMMAND_TIMEOUT *
 542                                                           rte_get_timer_hz();
 543                        }
 544                        break;
 545                }
 546
 547                pending_queue_advance(&pq_tail, pq_mask);
 548
 549                cop = infl_req->cop;
 550
 551                ops[i] = cop;
 552
 553                cn9k_cpt_dequeue_post_process(qp, cop, infl_req);
 554
 555                if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))
 556                        rte_mempool_put(qp->meta_info.pool, infl_req->mdata);
 557        }
 558
 559        pend_q->tail = pq_tail;
 560
 561        return i;
 562}
 563void
 564cn9k_cpt_set_enqdeq_fns(struct rte_cryptodev *dev)
 565{
 566        dev->enqueue_burst = cn9k_cpt_enqueue_burst;
 567        dev->dequeue_burst = cn9k_cpt_dequeue_burst;
 568
 569        rte_mb();
 570}
 571
 572static void
 573cn9k_cpt_dev_info_get(struct rte_cryptodev *dev,
 574                      struct rte_cryptodev_info *info)
 575{
 576        if (info != NULL) {
 577                cnxk_cpt_dev_info_get(dev, info);
 578                info->driver_id = cn9k_cryptodev_driver_id;
 579        }
 580}
 581
 582struct rte_cryptodev_ops cn9k_cpt_ops = {
 583        /* Device control ops */
 584        .dev_configure = cnxk_cpt_dev_config,
 585        .dev_start = cnxk_cpt_dev_start,
 586        .dev_stop = cnxk_cpt_dev_stop,
 587        .dev_close = cnxk_cpt_dev_close,
 588        .dev_infos_get = cn9k_cpt_dev_info_get,
 589
 590        .stats_get = NULL,
 591        .stats_reset = NULL,
 592        .queue_pair_setup = cnxk_cpt_queue_pair_setup,
 593        .queue_pair_release = cnxk_cpt_queue_pair_release,
 594
 595        /* Symmetric crypto ops */
 596        .sym_session_get_size = cnxk_cpt_sym_session_get_size,
 597        .sym_session_configure = cnxk_cpt_sym_session_configure,
 598        .sym_session_clear = cnxk_cpt_sym_session_clear,
 599
 600        /* Asymmetric crypto ops */
 601        .asym_session_get_size = cnxk_ae_session_size_get,
 602        .asym_session_configure = cnxk_ae_session_cfg,
 603        .asym_session_clear = cnxk_ae_session_clear,
 604
 605};
 606