dpdk/drivers/crypto/dpaa_sec/dpaa_sec.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 *
   3 *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
   4 *   Copyright 2017-2019 NXP
   5 *
   6 */
   7
   8#include <fcntl.h>
   9#include <unistd.h>
  10#include <sched.h>
  11#include <net/if.h>
  12
  13#include <rte_byteorder.h>
  14#include <rte_common.h>
  15#include <rte_cryptodev_pmd.h>
  16#include <rte_crypto.h>
  17#include <rte_cryptodev.h>
  18#ifdef RTE_LIB_SECURITY
  19#include <rte_security_driver.h>
  20#endif
  21#include <rte_cycles.h>
  22#include <rte_dev.h>
  23#include <rte_kvargs.h>
  24#include <rte_malloc.h>
  25#include <rte_mbuf.h>
  26#include <rte_memcpy.h>
  27#include <rte_string_fns.h>
  28#include <rte_spinlock.h>
  29
  30#include <fsl_usd.h>
  31#include <fsl_qman.h>
  32#include <dpaa_of.h>
  33
  34/* RTA header files */
  35#include <desc/common.h>
  36#include <desc/algo.h>
  37#include <desc/ipsec.h>
  38#include <desc/pdcp.h>
  39#include <desc/sdap.h>
  40
  41#include <rte_dpaa_bus.h>
  42#include <dpaa_sec.h>
  43#include <dpaa_sec_event.h>
  44#include <dpaa_sec_log.h>
  45#include <dpaax_iova_table.h>
  46
  47static uint8_t cryptodev_driver_id;
  48
  49static int
  50dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
  51
  52static inline void
  53dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
  54{
  55        if (!ctx->fd_status) {
  56                ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
  57        } else {
  58                DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
  59                ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
  60        }
  61}
  62
  63static inline struct dpaa_sec_op_ctx *
  64dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
  65{
  66        struct dpaa_sec_op_ctx *ctx;
  67        int i, retval;
  68
  69        retval = rte_mempool_get(
  70                        ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
  71                        (void **)(&ctx));
  72        if (!ctx || retval) {
  73                DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
  74                return NULL;
  75        }
  76        /*
  77         * Clear SG memory. There are 16 SG entries of 16 Bytes each.
  78         * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
  79         * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
  80         * each packet, memset is costlier than dcbz_64().
  81         */
  82        for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
  83                dcbz_64(&ctx->job.sg[i]);
  84
  85        ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
  86        ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
  87
  88        return ctx;
  89}
  90
  91static void
  92ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
  93                   struct qman_fq *fq,
  94                   const struct qm_mr_entry *msg)
  95{
  96        DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
  97                        fq->fqid, msg->ern.rc, msg->ern.seqnum);
  98}
  99
 100/* initialize the queue with dest chan as caam chan so that
 101 * all the packets in this queue could be dispatched into caam
 102 */
 103static int
 104dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
 105                 uint32_t fqid_out)
 106{
 107        struct qm_mcc_initfq fq_opts;
 108        uint32_t flags;
 109        int ret = -1;
 110
 111        /* Clear FQ options */
 112        memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
 113
 114        flags = QMAN_INITFQ_FLAG_SCHED;
 115        fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
 116                          QM_INITFQ_WE_CONTEXTB;
 117
 118        qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
 119        fq_opts.fqd.context_b = fqid_out;
 120        fq_opts.fqd.dest.channel = dpaa_get_qm_channel_caam();
 121        fq_opts.fqd.dest.wq = 0;
 122
 123        fq_in->cb.ern  = ern_sec_fq_handler;
 124
 125        DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
 126
 127        ret = qman_init_fq(fq_in, flags, &fq_opts);
 128        if (unlikely(ret != 0))
 129                DPAA_SEC_ERR("qman_init_fq failed %d", ret);
 130
 131        return ret;
 132}
 133
 134/* something is put into in_fq and caam put the crypto result into out_fq */
 135static enum qman_cb_dqrr_result
 136dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
 137                  struct qman_fq *fq __always_unused,
 138                  const struct qm_dqrr_entry *dqrr)
 139{
 140        const struct qm_fd *fd;
 141        struct dpaa_sec_job *job;
 142        struct dpaa_sec_op_ctx *ctx;
 143
 144        if (DPAA_PER_LCORE_DPAA_SEC_OP_NB >= DPAA_SEC_BURST)
 145                return qman_cb_dqrr_defer;
 146
 147        if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
 148                return qman_cb_dqrr_consume;
 149
 150        fd = &dqrr->fd;
 151        /* sg is embedded in an op ctx,
 152         * sg[0] is for output
 153         * sg[1] for input
 154         */
 155        job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
 156
 157        ctx = container_of(job, struct dpaa_sec_op_ctx, job);
 158        ctx->fd_status = fd->status;
 159        if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
 160                struct qm_sg_entry *sg_out;
 161                uint32_t len;
 162                struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
 163                                ctx->op->sym->m_src : ctx->op->sym->m_dst;
 164
 165                sg_out = &job->sg[0];
 166                hw_sg_to_cpu(sg_out);
 167                len = sg_out->length;
 168                mbuf->pkt_len = len;
 169                while (mbuf->next != NULL) {
 170                        len -= mbuf->data_len;
 171                        mbuf = mbuf->next;
 172                }
 173                mbuf->data_len = len;
 174        }
 175        DPAA_PER_LCORE_RTE_CRYPTO_OP[DPAA_PER_LCORE_DPAA_SEC_OP_NB++] = ctx->op;
 176        dpaa_sec_op_ending(ctx);
 177
 178        return qman_cb_dqrr_consume;
 179}
 180
 181/* caam result is put into this queue */
 182static int
 183dpaa_sec_init_tx(struct qman_fq *fq)
 184{
 185        int ret;
 186        struct qm_mcc_initfq opts;
 187        uint32_t flags;
 188
 189        flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
 190                QMAN_FQ_FLAG_DYNAMIC_FQID;
 191
 192        ret = qman_create_fq(0, flags, fq);
 193        if (unlikely(ret)) {
 194                DPAA_SEC_ERR("qman_create_fq failed");
 195                return ret;
 196        }
 197
 198        memset(&opts, 0, sizeof(opts));
 199        opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
 200                       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
 201
 202        /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
 203
 204        fq->cb.dqrr = dqrr_out_fq_cb_rx;
 205        fq->cb.ern  = ern_sec_fq_handler;
 206
 207        ret = qman_init_fq(fq, 0, &opts);
 208        if (unlikely(ret)) {
 209                DPAA_SEC_ERR("unable to init caam source fq!");
 210                return ret;
 211        }
 212
 213        return ret;
 214}
 215
 216static inline int is_aead(dpaa_sec_session *ses)
 217{
 218        return ((ses->cipher_alg == 0) &&
 219                (ses->auth_alg == 0) &&
 220                (ses->aead_alg != 0));
 221}
 222
 223static inline int is_encode(dpaa_sec_session *ses)
 224{
 225        return ses->dir == DIR_ENC;
 226}
 227
 228static inline int is_decode(dpaa_sec_session *ses)
 229{
 230        return ses->dir == DIR_DEC;
 231}
 232
 233#ifdef RTE_LIB_SECURITY
 234static int
 235dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
 236{
 237        struct alginfo authdata = {0}, cipherdata = {0};
 238        struct sec_cdb *cdb = &ses->cdb;
 239        struct alginfo *p_authdata = NULL;
 240        int32_t shared_desc_len = 0;
 241#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
 242        int swap = false;
 243#else
 244        int swap = true;
 245#endif
 246
 247        cipherdata.key = (size_t)ses->cipher_key.data;
 248        cipherdata.keylen = ses->cipher_key.length;
 249        cipherdata.key_enc_flags = 0;
 250        cipherdata.key_type = RTA_DATA_IMM;
 251        cipherdata.algtype = ses->cipher_key.alg;
 252        cipherdata.algmode = ses->cipher_key.algmode;
 253
 254        if (ses->auth_alg) {
 255                authdata.key = (size_t)ses->auth_key.data;
 256                authdata.keylen = ses->auth_key.length;
 257                authdata.key_enc_flags = 0;
 258                authdata.key_type = RTA_DATA_IMM;
 259                authdata.algtype = ses->auth_key.alg;
 260                authdata.algmode = ses->auth_key.algmode;
 261
 262                p_authdata = &authdata;
 263        }
 264
 265        if (rta_inline_pdcp_query(authdata.algtype,
 266                                cipherdata.algtype,
 267                                ses->pdcp.sn_size,
 268                                ses->pdcp.hfn_ovd)) {
 269                cipherdata.key =
 270                        (size_t)rte_dpaa_mem_vtop((void *)
 271                                        (size_t)cipherdata.key);
 272                cipherdata.key_type = RTA_DATA_PTR;
 273        }
 274
 275        if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
 276                if (ses->dir == DIR_ENC)
 277                        shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
 278                                        cdb->sh_desc, 1, swap,
 279                                        ses->pdcp.hfn,
 280                                        ses->pdcp.sn_size,
 281                                        ses->pdcp.bearer,
 282                                        ses->pdcp.pkt_dir,
 283                                        ses->pdcp.hfn_threshold,
 284                                        &cipherdata, &authdata,
 285                                        0);
 286                else if (ses->dir == DIR_DEC)
 287                        shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
 288                                        cdb->sh_desc, 1, swap,
 289                                        ses->pdcp.hfn,
 290                                        ses->pdcp.sn_size,
 291                                        ses->pdcp.bearer,
 292                                        ses->pdcp.pkt_dir,
 293                                        ses->pdcp.hfn_threshold,
 294                                        &cipherdata, &authdata,
 295                                        0);
 296        } else {
 297                if (ses->dir == DIR_ENC) {
 298                        if (ses->pdcp.sdap_enabled)
 299                                shared_desc_len =
 300                                        cnstr_shdsc_pdcp_sdap_u_plane_encap(
 301                                                cdb->sh_desc, 1, swap,
 302                                                ses->pdcp.sn_size,
 303                                                ses->pdcp.hfn,
 304                                                ses->pdcp.bearer,
 305                                                ses->pdcp.pkt_dir,
 306                                                ses->pdcp.hfn_threshold,
 307                                                &cipherdata, p_authdata, 0);
 308                        else
 309                                shared_desc_len =
 310                                        cnstr_shdsc_pdcp_u_plane_encap(
 311                                                cdb->sh_desc, 1, swap,
 312                                                ses->pdcp.sn_size,
 313                                                ses->pdcp.hfn,
 314                                                ses->pdcp.bearer,
 315                                                ses->pdcp.pkt_dir,
 316                                                ses->pdcp.hfn_threshold,
 317                                                &cipherdata, p_authdata, 0);
 318                } else if (ses->dir == DIR_DEC) {
 319                        if (ses->pdcp.sdap_enabled)
 320                                shared_desc_len =
 321                                        cnstr_shdsc_pdcp_sdap_u_plane_decap(
 322                                                cdb->sh_desc, 1, swap,
 323                                                ses->pdcp.sn_size,
 324                                                ses->pdcp.hfn,
 325                                                ses->pdcp.bearer,
 326                                                ses->pdcp.pkt_dir,
 327                                                ses->pdcp.hfn_threshold,
 328                                                &cipherdata, p_authdata, 0);
 329                        else
 330                                shared_desc_len =
 331                                        cnstr_shdsc_pdcp_u_plane_decap(
 332                                                cdb->sh_desc, 1, swap,
 333                                                ses->pdcp.sn_size,
 334                                                ses->pdcp.hfn,
 335                                                ses->pdcp.bearer,
 336                                                ses->pdcp.pkt_dir,
 337                                                ses->pdcp.hfn_threshold,
 338                                                &cipherdata, p_authdata, 0);
 339                }
 340        }
 341        return shared_desc_len;
 342}
 343
 344/* prepare ipsec proto command block of the session */
 345static int
 346dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
 347{
 348        struct alginfo cipherdata = {0}, authdata = {0};
 349        struct sec_cdb *cdb = &ses->cdb;
 350        int32_t shared_desc_len = 0;
 351        int err;
 352#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
 353        int swap = false;
 354#else
 355        int swap = true;
 356#endif
 357
 358        cipherdata.key = (size_t)ses->cipher_key.data;
 359        cipherdata.keylen = ses->cipher_key.length;
 360        cipherdata.key_enc_flags = 0;
 361        cipherdata.key_type = RTA_DATA_IMM;
 362        cipherdata.algtype = ses->cipher_key.alg;
 363        cipherdata.algmode = ses->cipher_key.algmode;
 364
 365        if (ses->auth_key.length) {
 366                authdata.key = (size_t)ses->auth_key.data;
 367                authdata.keylen = ses->auth_key.length;
 368                authdata.key_enc_flags = 0;
 369                authdata.key_type = RTA_DATA_IMM;
 370                authdata.algtype = ses->auth_key.alg;
 371                authdata.algmode = ses->auth_key.algmode;
 372        }
 373
 374        cdb->sh_desc[0] = cipherdata.keylen;
 375        cdb->sh_desc[1] = authdata.keylen;
 376        err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
 377                               DESC_JOB_IO_LEN,
 378                               (unsigned int *)cdb->sh_desc,
 379                               &cdb->sh_desc[2], 2);
 380
 381        if (err < 0) {
 382                DPAA_SEC_ERR("Crypto: Incorrect key lengths");
 383                return err;
 384        }
 385        if (cdb->sh_desc[2] & 1)
 386                cipherdata.key_type = RTA_DATA_IMM;
 387        else {
 388                cipherdata.key = (size_t)rte_dpaa_mem_vtop(
 389                                        (void *)(size_t)cipherdata.key);
 390                cipherdata.key_type = RTA_DATA_PTR;
 391        }
 392        if (cdb->sh_desc[2] & (1<<1))
 393                authdata.key_type = RTA_DATA_IMM;
 394        else {
 395                authdata.key = (size_t)rte_dpaa_mem_vtop(
 396                                        (void *)(size_t)authdata.key);
 397                authdata.key_type = RTA_DATA_PTR;
 398        }
 399
 400        cdb->sh_desc[0] = 0;
 401        cdb->sh_desc[1] = 0;
 402        cdb->sh_desc[2] = 0;
 403        if (ses->dir == DIR_ENC) {
 404                shared_desc_len = cnstr_shdsc_ipsec_new_encap(
 405                                cdb->sh_desc,
 406                                true, swap, SHR_SERIAL,
 407                                &ses->encap_pdb,
 408                                (uint8_t *)&ses->ip4_hdr,
 409                                &cipherdata, &authdata);
 410        } else if (ses->dir == DIR_DEC) {
 411                shared_desc_len = cnstr_shdsc_ipsec_new_decap(
 412                                cdb->sh_desc,
 413                                true, swap, SHR_SERIAL,
 414                                &ses->decap_pdb,
 415                                &cipherdata, &authdata);
 416        }
 417        return shared_desc_len;
 418}
 419#endif
 420/* prepare command block of the session */
 421static int
 422dpaa_sec_prep_cdb(dpaa_sec_session *ses)
 423{
 424        struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
 425        int32_t shared_desc_len = 0;
 426        struct sec_cdb *cdb = &ses->cdb;
 427        int err;
 428#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
 429        int swap = false;
 430#else
 431        int swap = true;
 432#endif
 433
 434        memset(cdb, 0, sizeof(struct sec_cdb));
 435
 436        switch (ses->ctxt) {
 437#ifdef RTE_LIB_SECURITY
 438        case DPAA_SEC_IPSEC:
 439                shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
 440                break;
 441        case DPAA_SEC_PDCP:
 442                shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
 443                break;
 444#endif
 445        case DPAA_SEC_CIPHER:
 446                alginfo_c.key = (size_t)ses->cipher_key.data;
 447                alginfo_c.keylen = ses->cipher_key.length;
 448                alginfo_c.key_enc_flags = 0;
 449                alginfo_c.key_type = RTA_DATA_IMM;
 450                alginfo_c.algtype = ses->cipher_key.alg;
 451                alginfo_c.algmode = ses->cipher_key.algmode;
 452
 453                switch (ses->cipher_alg) {
 454                case RTE_CRYPTO_CIPHER_AES_CBC:
 455                case RTE_CRYPTO_CIPHER_3DES_CBC:
 456                case RTE_CRYPTO_CIPHER_AES_CTR:
 457                case RTE_CRYPTO_CIPHER_3DES_CTR:
 458                        shared_desc_len = cnstr_shdsc_blkcipher(
 459                                        cdb->sh_desc, true,
 460                                        swap, SHR_NEVER, &alginfo_c,
 461                                        ses->iv.length,
 462                                        ses->dir);
 463                        break;
 464                case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
 465                        shared_desc_len = cnstr_shdsc_snow_f8(
 466                                        cdb->sh_desc, true, swap,
 467                                        &alginfo_c,
 468                                        ses->dir);
 469                        break;
 470                case RTE_CRYPTO_CIPHER_ZUC_EEA3:
 471                        shared_desc_len = cnstr_shdsc_zuce(
 472                                        cdb->sh_desc, true, swap,
 473                                        &alginfo_c,
 474                                        ses->dir);
 475                        break;
 476                default:
 477                        DPAA_SEC_ERR("unsupported cipher alg %d",
 478                                     ses->cipher_alg);
 479                        return -ENOTSUP;
 480                }
 481                break;
 482        case DPAA_SEC_AUTH:
 483                alginfo_a.key = (size_t)ses->auth_key.data;
 484                alginfo_a.keylen = ses->auth_key.length;
 485                alginfo_a.key_enc_flags = 0;
 486                alginfo_a.key_type = RTA_DATA_IMM;
 487                alginfo_a.algtype = ses->auth_key.alg;
 488                alginfo_a.algmode = ses->auth_key.algmode;
 489                switch (ses->auth_alg) {
 490                case RTE_CRYPTO_AUTH_MD5_HMAC:
 491                case RTE_CRYPTO_AUTH_SHA1_HMAC:
 492                case RTE_CRYPTO_AUTH_SHA224_HMAC:
 493                case RTE_CRYPTO_AUTH_SHA256_HMAC:
 494                case RTE_CRYPTO_AUTH_SHA384_HMAC:
 495                case RTE_CRYPTO_AUTH_SHA512_HMAC:
 496                        shared_desc_len = cnstr_shdsc_hmac(
 497                                                cdb->sh_desc, true,
 498                                                swap, SHR_NEVER, &alginfo_a,
 499                                                !ses->dir,
 500                                                ses->digest_length);
 501                        break;
 502                case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
 503                        shared_desc_len = cnstr_shdsc_snow_f9(
 504                                                cdb->sh_desc, true, swap,
 505                                                &alginfo_a,
 506                                                !ses->dir,
 507                                                ses->digest_length);
 508                        break;
 509                case RTE_CRYPTO_AUTH_ZUC_EIA3:
 510                        shared_desc_len = cnstr_shdsc_zuca(
 511                                                cdb->sh_desc, true, swap,
 512                                                &alginfo_a,
 513                                                !ses->dir,
 514                                                ses->digest_length);
 515                        break;
 516                default:
 517                        DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
 518                }
 519                break;
 520        case DPAA_SEC_AEAD:
 521                if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
 522                        DPAA_SEC_ERR("not supported aead alg");
 523                        return -ENOTSUP;
 524                }
 525                alginfo.key = (size_t)ses->aead_key.data;
 526                alginfo.keylen = ses->aead_key.length;
 527                alginfo.key_enc_flags = 0;
 528                alginfo.key_type = RTA_DATA_IMM;
 529                alginfo.algtype = ses->aead_key.alg;
 530                alginfo.algmode = ses->aead_key.algmode;
 531
 532                if (ses->dir == DIR_ENC)
 533                        shared_desc_len = cnstr_shdsc_gcm_encap(
 534                                        cdb->sh_desc, true, swap, SHR_NEVER,
 535                                        &alginfo,
 536                                        ses->iv.length,
 537                                        ses->digest_length);
 538                else
 539                        shared_desc_len = cnstr_shdsc_gcm_decap(
 540                                        cdb->sh_desc, true, swap, SHR_NEVER,
 541                                        &alginfo,
 542                                        ses->iv.length,
 543                                        ses->digest_length);
 544                break;
 545        case DPAA_SEC_CIPHER_HASH:
 546                alginfo_c.key = (size_t)ses->cipher_key.data;
 547                alginfo_c.keylen = ses->cipher_key.length;
 548                alginfo_c.key_enc_flags = 0;
 549                alginfo_c.key_type = RTA_DATA_IMM;
 550                alginfo_c.algtype = ses->cipher_key.alg;
 551                alginfo_c.algmode = ses->cipher_key.algmode;
 552
 553                alginfo_a.key = (size_t)ses->auth_key.data;
 554                alginfo_a.keylen = ses->auth_key.length;
 555                alginfo_a.key_enc_flags = 0;
 556                alginfo_a.key_type = RTA_DATA_IMM;
 557                alginfo_a.algtype = ses->auth_key.alg;
 558                alginfo_a.algmode = ses->auth_key.algmode;
 559
 560                cdb->sh_desc[0] = alginfo_c.keylen;
 561                cdb->sh_desc[1] = alginfo_a.keylen;
 562                err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
 563                                       DESC_JOB_IO_LEN,
 564                                       (unsigned int *)cdb->sh_desc,
 565                                       &cdb->sh_desc[2], 2);
 566
 567                if (err < 0) {
 568                        DPAA_SEC_ERR("Crypto: Incorrect key lengths");
 569                        return err;
 570                }
 571                if (cdb->sh_desc[2] & 1)
 572                        alginfo_c.key_type = RTA_DATA_IMM;
 573                else {
 574                        alginfo_c.key = (size_t)rte_dpaa_mem_vtop(
 575                                                (void *)(size_t)alginfo_c.key);
 576                        alginfo_c.key_type = RTA_DATA_PTR;
 577                }
 578                if (cdb->sh_desc[2] & (1<<1))
 579                        alginfo_a.key_type = RTA_DATA_IMM;
 580                else {
 581                        alginfo_a.key = (size_t)rte_dpaa_mem_vtop(
 582                                                (void *)(size_t)alginfo_a.key);
 583                        alginfo_a.key_type = RTA_DATA_PTR;
 584                }
 585                cdb->sh_desc[0] = 0;
 586                cdb->sh_desc[1] = 0;
 587                cdb->sh_desc[2] = 0;
 588                /* Auth_only_len is set as 0 here and it will be
 589                 * overwritten in fd for each packet.
 590                 */
 591                shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
 592                                true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
 593                                ses->iv.length,
 594                                ses->digest_length, ses->dir);
 595                break;
 596        case DPAA_SEC_HASH_CIPHER:
 597        default:
 598                DPAA_SEC_ERR("error: Unsupported session");
 599                return -ENOTSUP;
 600        }
 601
 602        if (shared_desc_len < 0) {
 603                DPAA_SEC_ERR("error in preparing command block");
 604                return shared_desc_len;
 605        }
 606
 607        cdb->sh_hdr.hi.field.idlen = shared_desc_len;
 608        cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
 609        cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
 610
 611        return 0;
 612}
 613
 614/* qp is lockless, should be accessed by only one thread */
 615static int
 616dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
 617{
 618        struct qman_fq *fq;
 619        unsigned int pkts = 0;
 620        int num_rx_bufs, ret;
 621        struct qm_dqrr_entry *dq;
 622        uint32_t vdqcr_flags = 0;
 623
 624        fq = &qp->outq;
 625        /*
 626         * Until request for four buffers, we provide exact number of buffers.
 627         * Otherwise we do not set the QM_VDQCR_EXACT flag.
 628         * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
 629         * requested, so we request two less in this case.
 630         */
 631        if (nb_ops < 4) {
 632                vdqcr_flags = QM_VDQCR_EXACT;
 633                num_rx_bufs = nb_ops;
 634        } else {
 635                num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
 636                        (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
 637        }
 638        ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
 639        if (ret)
 640                return 0;
 641
 642        do {
 643                const struct qm_fd *fd;
 644                struct dpaa_sec_job *job;
 645                struct dpaa_sec_op_ctx *ctx;
 646                struct rte_crypto_op *op;
 647
 648                dq = qman_dequeue(fq);
 649                if (!dq)
 650                        continue;
 651
 652                fd = &dq->fd;
 653                /* sg is embedded in an op ctx,
 654                 * sg[0] is for output
 655                 * sg[1] for input
 656                 */
 657                job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
 658
 659                ctx = container_of(job, struct dpaa_sec_op_ctx, job);
 660                ctx->fd_status = fd->status;
 661                op = ctx->op;
 662                if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
 663                        struct qm_sg_entry *sg_out;
 664                        uint32_t len;
 665                        struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
 666                                                op->sym->m_src : op->sym->m_dst;
 667
 668                        sg_out = &job->sg[0];
 669                        hw_sg_to_cpu(sg_out);
 670                        len = sg_out->length;
 671                        mbuf->pkt_len = len;
 672                        while (mbuf->next != NULL) {
 673                                len -= mbuf->data_len;
 674                                mbuf = mbuf->next;
 675                        }
 676                        mbuf->data_len = len;
 677                }
 678                if (!ctx->fd_status) {
 679                        op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
 680                } else {
 681                        DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
 682                        op->status = RTE_CRYPTO_OP_STATUS_ERROR;
 683                }
 684                ops[pkts++] = op;
 685
 686                /* report op status to sym->op and then free the ctx memeory */
 687                rte_mempool_put(ctx->ctx_pool, (void *)ctx);
 688
 689                qman_dqrr_consume(fq, dq);
 690        } while (fq->flags & QMAN_FQ_STATE_VDQCR);
 691
 692        return pkts;
 693}
 694
 695static inline struct dpaa_sec_job *
 696build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
 697{
 698        struct rte_crypto_sym_op *sym = op->sym;
 699        struct rte_mbuf *mbuf = sym->m_src;
 700        struct dpaa_sec_job *cf;
 701        struct dpaa_sec_op_ctx *ctx;
 702        struct qm_sg_entry *sg, *out_sg, *in_sg;
 703        phys_addr_t start_addr;
 704        uint8_t *old_digest, extra_segs;
 705        int data_len, data_offset;
 706
 707        data_len = sym->auth.data.length;
 708        data_offset = sym->auth.data.offset;
 709
 710        if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
 711            ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
 712                if ((data_len & 7) || (data_offset & 7)) {
 713                        DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
 714                        return NULL;
 715                }
 716
 717                data_len = data_len >> 3;
 718                data_offset = data_offset >> 3;
 719        }
 720
 721        if (is_decode(ses))
 722                extra_segs = 3;
 723        else
 724                extra_segs = 2;
 725
 726        if (mbuf->nb_segs > MAX_SG_ENTRIES) {
 727                DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
 728                                MAX_SG_ENTRIES);
 729                return NULL;
 730        }
 731        ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
 732        if (!ctx)
 733                return NULL;
 734
 735        cf = &ctx->job;
 736        ctx->op = op;
 737        old_digest = ctx->digest;
 738
 739        /* output */
 740        out_sg = &cf->sg[0];
 741        qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
 742        out_sg->length = ses->digest_length;
 743        cpu_to_hw_sg(out_sg);
 744
 745        /* input */
 746        in_sg = &cf->sg[1];
 747        /* need to extend the input to a compound frame */
 748        in_sg->extension = 1;
 749        in_sg->final = 1;
 750        in_sg->length = data_len;
 751        qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
 752
 753        /* 1st seg */
 754        sg = in_sg + 1;
 755
 756        if (ses->iv.length) {
 757                uint8_t *iv_ptr;
 758
 759                iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
 760                                                   ses->iv.offset);
 761
 762                if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
 763                        iv_ptr = conv_to_snow_f9_iv(iv_ptr);
 764                        sg->length = 12;
 765                } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
 766                        iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
 767                        sg->length = 8;
 768                } else {
 769                        sg->length = ses->iv.length;
 770                }
 771                qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
 772                in_sg->length += sg->length;
 773                cpu_to_hw_sg(sg);
 774                sg++;
 775        }
 776
 777        qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
 778        sg->offset = data_offset;
 779
 780        if (data_len <= (mbuf->data_len - data_offset)) {
 781                sg->length = data_len;
 782        } else {
 783                sg->length = mbuf->data_len - data_offset;
 784
 785                /* remaining i/p segs */
 786                while ((data_len = data_len - sg->length) &&
 787                       (mbuf = mbuf->next)) {
 788                        cpu_to_hw_sg(sg);
 789                        sg++;
 790                        qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
 791                        if (data_len > mbuf->data_len)
 792                                sg->length = mbuf->data_len;
 793                        else
 794                                sg->length = data_len;
 795                }
 796        }
 797
 798        if (is_decode(ses)) {
 799                /* Digest verification case */
 800                cpu_to_hw_sg(sg);
 801                sg++;
 802                rte_memcpy(old_digest, sym->auth.digest.data,
 803                                ses->digest_length);
 804                start_addr = rte_dpaa_mem_vtop(old_digest);
 805                qm_sg_entry_set64(sg, start_addr);
 806                sg->length = ses->digest_length;
 807                in_sg->length += ses->digest_length;
 808        }
 809        sg->final = 1;
 810        cpu_to_hw_sg(sg);
 811        cpu_to_hw_sg(in_sg);
 812
 813        return cf;
 814}
 815
 816/**
 817 * packet looks like:
 818 *              |<----data_len------->|
 819 *    |ip_header|ah_header|icv|payload|
 820 *              ^
 821 *              |
 822 *         mbuf->pkt.data
 823 */
 824static inline struct dpaa_sec_job *
 825build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
 826{
 827        struct rte_crypto_sym_op *sym = op->sym;
 828        struct rte_mbuf *mbuf = sym->m_src;
 829        struct dpaa_sec_job *cf;
 830        struct dpaa_sec_op_ctx *ctx;
 831        struct qm_sg_entry *sg, *in_sg;
 832        rte_iova_t start_addr;
 833        uint8_t *old_digest;
 834        int data_len, data_offset;
 835
 836        data_len = sym->auth.data.length;
 837        data_offset = sym->auth.data.offset;
 838
 839        if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
 840            ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
 841                if ((data_len & 7) || (data_offset & 7)) {
 842                        DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
 843                        return NULL;
 844                }
 845
 846                data_len = data_len >> 3;
 847                data_offset = data_offset >> 3;
 848        }
 849
 850        ctx = dpaa_sec_alloc_ctx(ses, 4);
 851        if (!ctx)
 852                return NULL;
 853
 854        cf = &ctx->job;
 855        ctx->op = op;
 856        old_digest = ctx->digest;
 857
 858        start_addr = rte_pktmbuf_iova(mbuf);
 859        /* output */
 860        sg = &cf->sg[0];
 861        qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
 862        sg->length = ses->digest_length;
 863        cpu_to_hw_sg(sg);
 864
 865        /* input */
 866        in_sg = &cf->sg[1];
 867        /* need to extend the input to a compound frame */
 868        in_sg->extension = 1;
 869        in_sg->final = 1;
 870        in_sg->length = data_len;
 871        qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
 872        sg = &cf->sg[2];
 873
 874        if (ses->iv.length) {
 875                uint8_t *iv_ptr;
 876
 877                iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
 878                                                   ses->iv.offset);
 879
 880                if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
 881                        iv_ptr = conv_to_snow_f9_iv(iv_ptr);
 882                        sg->length = 12;
 883                } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
 884                        iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
 885                        sg->length = 8;
 886                } else {
 887                        sg->length = ses->iv.length;
 888                }
 889                qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
 890                in_sg->length += sg->length;
 891                cpu_to_hw_sg(sg);
 892                sg++;
 893        }
 894
 895        qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
 896        sg->offset = data_offset;
 897        sg->length = data_len;
 898
 899        if (is_decode(ses)) {
 900                /* Digest verification case */
 901                cpu_to_hw_sg(sg);
 902                /* hash result or digest, save digest first */
 903                rte_memcpy(old_digest, sym->auth.digest.data,
 904                                ses->digest_length);
 905                /* let's check digest by hw */
 906                start_addr = rte_dpaa_mem_vtop(old_digest);
 907                sg++;
 908                qm_sg_entry_set64(sg, start_addr);
 909                sg->length = ses->digest_length;
 910                in_sg->length += ses->digest_length;
 911        }
 912        sg->final = 1;
 913        cpu_to_hw_sg(sg);
 914        cpu_to_hw_sg(in_sg);
 915
 916        return cf;
 917}
 918
 919static inline struct dpaa_sec_job *
 920build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
 921{
 922        struct rte_crypto_sym_op *sym = op->sym;
 923        struct dpaa_sec_job *cf;
 924        struct dpaa_sec_op_ctx *ctx;
 925        struct qm_sg_entry *sg, *out_sg, *in_sg;
 926        struct rte_mbuf *mbuf;
 927        uint8_t req_segs;
 928        uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
 929                        ses->iv.offset);
 930        int data_len, data_offset;
 931
 932        data_len = sym->cipher.data.length;
 933        data_offset = sym->cipher.data.offset;
 934
 935        if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
 936                ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
 937                if ((data_len & 7) || (data_offset & 7)) {
 938                        DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
 939                        return NULL;
 940                }
 941
 942                data_len = data_len >> 3;
 943                data_offset = data_offset >> 3;
 944        }
 945
 946        if (sym->m_dst) {
 947                mbuf = sym->m_dst;
 948                req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
 949        } else {
 950                mbuf = sym->m_src;
 951                req_segs = mbuf->nb_segs * 2 + 3;
 952        }
 953        if (mbuf->nb_segs > MAX_SG_ENTRIES) {
 954                DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
 955                                MAX_SG_ENTRIES);
 956                return NULL;
 957        }
 958
 959        ctx = dpaa_sec_alloc_ctx(ses, req_segs);
 960        if (!ctx)
 961                return NULL;
 962
 963        cf = &ctx->job;
 964        ctx->op = op;
 965
 966        /* output */
 967        out_sg = &cf->sg[0];
 968        out_sg->extension = 1;
 969        out_sg->length = data_len;
 970        qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
 971        cpu_to_hw_sg(out_sg);
 972
 973        /* 1st seg */
 974        sg = &cf->sg[2];
 975        qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
 976        sg->length = mbuf->data_len - data_offset;
 977        sg->offset = data_offset;
 978
 979        /* Successive segs */
 980        mbuf = mbuf->next;
 981        while (mbuf) {
 982                cpu_to_hw_sg(sg);
 983                sg++;
 984                qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
 985                sg->length = mbuf->data_len;
 986                mbuf = mbuf->next;
 987        }
 988        sg->final = 1;
 989        cpu_to_hw_sg(sg);
 990
 991        /* input */
 992        mbuf = sym->m_src;
 993        in_sg = &cf->sg[1];
 994        in_sg->extension = 1;
 995        in_sg->final = 1;
 996        in_sg->length = data_len + ses->iv.length;
 997
 998        sg++;
 999        qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1000        cpu_to_hw_sg(in_sg);
1001
1002        /* IV */
1003        qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1004        sg->length = ses->iv.length;
1005        cpu_to_hw_sg(sg);
1006
1007        /* 1st seg */
1008        sg++;
1009        qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1010        sg->length = mbuf->data_len - data_offset;
1011        sg->offset = data_offset;
1012
1013        /* Successive segs */
1014        mbuf = mbuf->next;
1015        while (mbuf) {
1016                cpu_to_hw_sg(sg);
1017                sg++;
1018                qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1019                sg->length = mbuf->data_len;
1020                mbuf = mbuf->next;
1021        }
1022        sg->final = 1;
1023        cpu_to_hw_sg(sg);
1024
1025        return cf;
1026}
1027
1028static inline struct dpaa_sec_job *
1029build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1030{
1031        struct rte_crypto_sym_op *sym = op->sym;
1032        struct dpaa_sec_job *cf;
1033        struct dpaa_sec_op_ctx *ctx;
1034        struct qm_sg_entry *sg;
1035        rte_iova_t src_start_addr, dst_start_addr;
1036        uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1037                        ses->iv.offset);
1038        int data_len, data_offset;
1039
1040        data_len = sym->cipher.data.length;
1041        data_offset = sym->cipher.data.offset;
1042
1043        if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1044                ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1045                if ((data_len & 7) || (data_offset & 7)) {
1046                        DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1047                        return NULL;
1048                }
1049
1050                data_len = data_len >> 3;
1051                data_offset = data_offset >> 3;
1052        }
1053
1054        ctx = dpaa_sec_alloc_ctx(ses, 4);
1055        if (!ctx)
1056                return NULL;
1057
1058        cf = &ctx->job;
1059        ctx->op = op;
1060
1061        src_start_addr = rte_pktmbuf_iova(sym->m_src);
1062
1063        if (sym->m_dst)
1064                dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1065        else
1066                dst_start_addr = src_start_addr;
1067
1068        /* output */
1069        sg = &cf->sg[0];
1070        qm_sg_entry_set64(sg, dst_start_addr + data_offset);
1071        sg->length = data_len + ses->iv.length;
1072        cpu_to_hw_sg(sg);
1073
1074        /* input */
1075        sg = &cf->sg[1];
1076
1077        /* need to extend the input to a compound frame */
1078        sg->extension = 1;
1079        sg->final = 1;
1080        sg->length = data_len + ses->iv.length;
1081        qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1082        cpu_to_hw_sg(sg);
1083
1084        sg = &cf->sg[2];
1085        qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1086        sg->length = ses->iv.length;
1087        cpu_to_hw_sg(sg);
1088
1089        sg++;
1090        qm_sg_entry_set64(sg, src_start_addr + data_offset);
1091        sg->length = data_len;
1092        sg->final = 1;
1093        cpu_to_hw_sg(sg);
1094
1095        return cf;
1096}
1097
1098static inline struct dpaa_sec_job *
1099build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1100{
1101        struct rte_crypto_sym_op *sym = op->sym;
1102        struct dpaa_sec_job *cf;
1103        struct dpaa_sec_op_ctx *ctx;
1104        struct qm_sg_entry *sg, *out_sg, *in_sg;
1105        struct rte_mbuf *mbuf;
1106        uint8_t req_segs;
1107        uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1108                        ses->iv.offset);
1109
1110        if (sym->m_dst) {
1111                mbuf = sym->m_dst;
1112                req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1113        } else {
1114                mbuf = sym->m_src;
1115                req_segs = mbuf->nb_segs * 2 + 4;
1116        }
1117
1118        if (ses->auth_only_len)
1119                req_segs++;
1120
1121        if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1122                DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1123                                MAX_SG_ENTRIES);
1124                return NULL;
1125        }
1126
1127        ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1128        if (!ctx)
1129                return NULL;
1130
1131        cf = &ctx->job;
1132        ctx->op = op;
1133
1134        rte_prefetch0(cf->sg);
1135
1136        /* output */
1137        out_sg = &cf->sg[0];
1138        out_sg->extension = 1;
1139        if (is_encode(ses))
1140                out_sg->length = sym->aead.data.length + ses->digest_length;
1141        else
1142                out_sg->length = sym->aead.data.length;
1143
1144        /* output sg entries */
1145        sg = &cf->sg[2];
1146        qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1147        cpu_to_hw_sg(out_sg);
1148
1149        /* 1st seg */
1150        qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1151        sg->length = mbuf->data_len - sym->aead.data.offset;
1152        sg->offset = sym->aead.data.offset;
1153
1154        /* Successive segs */
1155        mbuf = mbuf->next;
1156        while (mbuf) {
1157                cpu_to_hw_sg(sg);
1158                sg++;
1159                qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1160                sg->length = mbuf->data_len;
1161                mbuf = mbuf->next;
1162        }
1163        sg->length -= ses->digest_length;
1164
1165        if (is_encode(ses)) {
1166                cpu_to_hw_sg(sg);
1167                /* set auth output */
1168                sg++;
1169                qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1170                sg->length = ses->digest_length;
1171        }
1172        sg->final = 1;
1173        cpu_to_hw_sg(sg);
1174
1175        /* input */
1176        mbuf = sym->m_src;
1177        in_sg = &cf->sg[1];
1178        in_sg->extension = 1;
1179        in_sg->final = 1;
1180        if (is_encode(ses))
1181                in_sg->length = ses->iv.length + sym->aead.data.length
1182                                                        + ses->auth_only_len;
1183        else
1184                in_sg->length = ses->iv.length + sym->aead.data.length
1185                                + ses->auth_only_len + ses->digest_length;
1186
1187        /* input sg entries */
1188        sg++;
1189        qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1190        cpu_to_hw_sg(in_sg);
1191
1192        /* 1st seg IV */
1193        qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1194        sg->length = ses->iv.length;
1195        cpu_to_hw_sg(sg);
1196
1197        /* 2nd seg auth only */
1198        if (ses->auth_only_len) {
1199                sg++;
1200                qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(sym->aead.aad.data));
1201                sg->length = ses->auth_only_len;
1202                cpu_to_hw_sg(sg);
1203        }
1204
1205        /* 3rd seg */
1206        sg++;
1207        qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1208        sg->length = mbuf->data_len - sym->aead.data.offset;
1209        sg->offset = sym->aead.data.offset;
1210
1211        /* Successive segs */
1212        mbuf = mbuf->next;
1213        while (mbuf) {
1214                cpu_to_hw_sg(sg);
1215                sg++;
1216                qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1217                sg->length = mbuf->data_len;
1218                mbuf = mbuf->next;
1219        }
1220
1221        if (is_decode(ses)) {
1222                cpu_to_hw_sg(sg);
1223                sg++;
1224                memcpy(ctx->digest, sym->aead.digest.data,
1225                        ses->digest_length);
1226                qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1227                sg->length = ses->digest_length;
1228        }
1229        sg->final = 1;
1230        cpu_to_hw_sg(sg);
1231
1232        return cf;
1233}
1234
1235static inline struct dpaa_sec_job *
1236build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1237{
1238        struct rte_crypto_sym_op *sym = op->sym;
1239        struct dpaa_sec_job *cf;
1240        struct dpaa_sec_op_ctx *ctx;
1241        struct qm_sg_entry *sg;
1242        uint32_t length = 0;
1243        rte_iova_t src_start_addr, dst_start_addr;
1244        uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1245                        ses->iv.offset);
1246
1247        src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1248
1249        if (sym->m_dst)
1250                dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1251        else
1252                dst_start_addr = src_start_addr;
1253
1254        ctx = dpaa_sec_alloc_ctx(ses, 7);
1255        if (!ctx)
1256                return NULL;
1257
1258        cf = &ctx->job;
1259        ctx->op = op;
1260
1261        /* input */
1262        rte_prefetch0(cf->sg);
1263        sg = &cf->sg[2];
1264        qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1265        if (is_encode(ses)) {
1266                qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1267                sg->length = ses->iv.length;
1268                length += sg->length;
1269                cpu_to_hw_sg(sg);
1270
1271                sg++;
1272                if (ses->auth_only_len) {
1273                        qm_sg_entry_set64(sg,
1274                                          rte_dpaa_mem_vtop(sym->aead.aad.data));
1275                        sg->length = ses->auth_only_len;
1276                        length += sg->length;
1277                        cpu_to_hw_sg(sg);
1278                        sg++;
1279                }
1280                qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1281                sg->length = sym->aead.data.length;
1282                length += sg->length;
1283                sg->final = 1;
1284                cpu_to_hw_sg(sg);
1285        } else {
1286                qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1287                sg->length = ses->iv.length;
1288                length += sg->length;
1289                cpu_to_hw_sg(sg);
1290
1291                sg++;
1292                if (ses->auth_only_len) {
1293                        qm_sg_entry_set64(sg,
1294                                          rte_dpaa_mem_vtop(sym->aead.aad.data));
1295                        sg->length = ses->auth_only_len;
1296                        length += sg->length;
1297                        cpu_to_hw_sg(sg);
1298                        sg++;
1299                }
1300                qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1301                sg->length = sym->aead.data.length;
1302                length += sg->length;
1303                cpu_to_hw_sg(sg);
1304
1305                memcpy(ctx->digest, sym->aead.digest.data,
1306                       ses->digest_length);
1307                sg++;
1308
1309                qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1310                sg->length = ses->digest_length;
1311                length += sg->length;
1312                sg->final = 1;
1313                cpu_to_hw_sg(sg);
1314        }
1315        /* input compound frame */
1316        cf->sg[1].length = length;
1317        cf->sg[1].extension = 1;
1318        cf->sg[1].final = 1;
1319        cpu_to_hw_sg(&cf->sg[1]);
1320
1321        /* output */
1322        sg++;
1323        qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1324        qm_sg_entry_set64(sg,
1325                dst_start_addr + sym->aead.data.offset);
1326        sg->length = sym->aead.data.length;
1327        length = sg->length;
1328        if (is_encode(ses)) {
1329                cpu_to_hw_sg(sg);
1330                /* set auth output */
1331                sg++;
1332                qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1333                sg->length = ses->digest_length;
1334                length += sg->length;
1335        }
1336        sg->final = 1;
1337        cpu_to_hw_sg(sg);
1338
1339        /* output compound frame */
1340        cf->sg[0].length = length;
1341        cf->sg[0].extension = 1;
1342        cpu_to_hw_sg(&cf->sg[0]);
1343
1344        return cf;
1345}
1346
1347static inline struct dpaa_sec_job *
1348build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1349{
1350        struct rte_crypto_sym_op *sym = op->sym;
1351        struct dpaa_sec_job *cf;
1352        struct dpaa_sec_op_ctx *ctx;
1353        struct qm_sg_entry *sg, *out_sg, *in_sg;
1354        struct rte_mbuf *mbuf;
1355        uint8_t req_segs;
1356        uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1357                        ses->iv.offset);
1358
1359        if (sym->m_dst) {
1360                mbuf = sym->m_dst;
1361                req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1362        } else {
1363                mbuf = sym->m_src;
1364                req_segs = mbuf->nb_segs * 2 + 4;
1365        }
1366
1367        if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1368                DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1369                                MAX_SG_ENTRIES);
1370                return NULL;
1371        }
1372
1373        ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1374        if (!ctx)
1375                return NULL;
1376
1377        cf = &ctx->job;
1378        ctx->op = op;
1379
1380        rte_prefetch0(cf->sg);
1381
1382        /* output */
1383        out_sg = &cf->sg[0];
1384        out_sg->extension = 1;
1385        if (is_encode(ses))
1386                out_sg->length = sym->auth.data.length + ses->digest_length;
1387        else
1388                out_sg->length = sym->auth.data.length;
1389
1390        /* output sg entries */
1391        sg = &cf->sg[2];
1392        qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1393        cpu_to_hw_sg(out_sg);
1394
1395        /* 1st seg */
1396        qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1397        sg->length = mbuf->data_len - sym->auth.data.offset;
1398        sg->offset = sym->auth.data.offset;
1399
1400        /* Successive segs */
1401        mbuf = mbuf->next;
1402        while (mbuf) {
1403                cpu_to_hw_sg(sg);
1404                sg++;
1405                qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1406                sg->length = mbuf->data_len;
1407                mbuf = mbuf->next;
1408        }
1409        sg->length -= ses->digest_length;
1410
1411        if (is_encode(ses)) {
1412                cpu_to_hw_sg(sg);
1413                /* set auth output */
1414                sg++;
1415                qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1416                sg->length = ses->digest_length;
1417        }
1418        sg->final = 1;
1419        cpu_to_hw_sg(sg);
1420
1421        /* input */
1422        mbuf = sym->m_src;
1423        in_sg = &cf->sg[1];
1424        in_sg->extension = 1;
1425        in_sg->final = 1;
1426        if (is_encode(ses))
1427                in_sg->length = ses->iv.length + sym->auth.data.length;
1428        else
1429                in_sg->length = ses->iv.length + sym->auth.data.length
1430                                                + ses->digest_length;
1431
1432        /* input sg entries */
1433        sg++;
1434        qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1435        cpu_to_hw_sg(in_sg);
1436
1437        /* 1st seg IV */
1438        qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1439        sg->length = ses->iv.length;
1440        cpu_to_hw_sg(sg);
1441
1442        /* 2nd seg */
1443        sg++;
1444        qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1445        sg->length = mbuf->data_len - sym->auth.data.offset;
1446        sg->offset = sym->auth.data.offset;
1447
1448        /* Successive segs */
1449        mbuf = mbuf->next;
1450        while (mbuf) {
1451                cpu_to_hw_sg(sg);
1452                sg++;
1453                qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1454                sg->length = mbuf->data_len;
1455                mbuf = mbuf->next;
1456        }
1457
1458        sg->length -= ses->digest_length;
1459        if (is_decode(ses)) {
1460                cpu_to_hw_sg(sg);
1461                sg++;
1462                memcpy(ctx->digest, sym->auth.digest.data,
1463                        ses->digest_length);
1464                qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1465                sg->length = ses->digest_length;
1466        }
1467        sg->final = 1;
1468        cpu_to_hw_sg(sg);
1469
1470        return cf;
1471}
1472
1473static inline struct dpaa_sec_job *
1474build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1475{
1476        struct rte_crypto_sym_op *sym = op->sym;
1477        struct dpaa_sec_job *cf;
1478        struct dpaa_sec_op_ctx *ctx;
1479        struct qm_sg_entry *sg;
1480        rte_iova_t src_start_addr, dst_start_addr;
1481        uint32_t length = 0;
1482        uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1483                        ses->iv.offset);
1484
1485        src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1486        if (sym->m_dst)
1487                dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1488        else
1489                dst_start_addr = src_start_addr;
1490
1491        ctx = dpaa_sec_alloc_ctx(ses, 7);
1492        if (!ctx)
1493                return NULL;
1494
1495        cf = &ctx->job;
1496        ctx->op = op;
1497
1498        /* input */
1499        rte_prefetch0(cf->sg);
1500        sg = &cf->sg[2];
1501        qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1502        if (is_encode(ses)) {
1503                qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1504                sg->length = ses->iv.length;
1505                length += sg->length;
1506                cpu_to_hw_sg(sg);
1507
1508                sg++;
1509                qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1510                sg->length = sym->auth.data.length;
1511                length += sg->length;
1512                sg->final = 1;
1513                cpu_to_hw_sg(sg);
1514        } else {
1515                qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1516                sg->length = ses->iv.length;
1517                length += sg->length;
1518                cpu_to_hw_sg(sg);
1519
1520                sg++;
1521
1522                qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1523                sg->length = sym->auth.data.length;
1524                length += sg->length;
1525                cpu_to_hw_sg(sg);
1526
1527                memcpy(ctx->digest, sym->auth.digest.data,
1528                       ses->digest_length);
1529                sg++;
1530
1531                qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1532                sg->length = ses->digest_length;
1533                length += sg->length;
1534                sg->final = 1;
1535                cpu_to_hw_sg(sg);
1536        }
1537        /* input compound frame */
1538        cf->sg[1].length = length;
1539        cf->sg[1].extension = 1;
1540        cf->sg[1].final = 1;
1541        cpu_to_hw_sg(&cf->sg[1]);
1542
1543        /* output */
1544        sg++;
1545        qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1546        qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1547        sg->length = sym->cipher.data.length;
1548        length = sg->length;
1549        if (is_encode(ses)) {
1550                cpu_to_hw_sg(sg);
1551                /* set auth output */
1552                sg++;
1553                qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1554                sg->length = ses->digest_length;
1555                length += sg->length;
1556        }
1557        sg->final = 1;
1558        cpu_to_hw_sg(sg);
1559
1560        /* output compound frame */
1561        cf->sg[0].length = length;
1562        cf->sg[0].extension = 1;
1563        cpu_to_hw_sg(&cf->sg[0]);
1564
1565        return cf;
1566}
1567
1568#ifdef RTE_LIB_SECURITY
1569static inline struct dpaa_sec_job *
1570build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1571{
1572        struct rte_crypto_sym_op *sym = op->sym;
1573        struct dpaa_sec_job *cf;
1574        struct dpaa_sec_op_ctx *ctx;
1575        struct qm_sg_entry *sg;
1576        phys_addr_t src_start_addr, dst_start_addr;
1577
1578        ctx = dpaa_sec_alloc_ctx(ses, 2);
1579        if (!ctx)
1580                return NULL;
1581        cf = &ctx->job;
1582        ctx->op = op;
1583
1584        src_start_addr = rte_pktmbuf_iova(sym->m_src);
1585
1586        if (sym->m_dst)
1587                dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1588        else
1589                dst_start_addr = src_start_addr;
1590
1591        /* input */
1592        sg = &cf->sg[1];
1593        qm_sg_entry_set64(sg, src_start_addr);
1594        sg->length = sym->m_src->pkt_len;
1595        sg->final = 1;
1596        cpu_to_hw_sg(sg);
1597
1598        sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1599        /* output */
1600        sg = &cf->sg[0];
1601        qm_sg_entry_set64(sg, dst_start_addr);
1602        sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1603        cpu_to_hw_sg(sg);
1604
1605        return cf;
1606}
1607
1608static inline struct dpaa_sec_job *
1609build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1610{
1611        struct rte_crypto_sym_op *sym = op->sym;
1612        struct dpaa_sec_job *cf;
1613        struct dpaa_sec_op_ctx *ctx;
1614        struct qm_sg_entry *sg, *out_sg, *in_sg;
1615        struct rte_mbuf *mbuf;
1616        uint8_t req_segs;
1617        uint32_t in_len = 0, out_len = 0;
1618
1619        if (sym->m_dst)
1620                mbuf = sym->m_dst;
1621        else
1622                mbuf = sym->m_src;
1623
1624        req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1625        if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1626                DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1627                                MAX_SG_ENTRIES);
1628                return NULL;
1629        }
1630
1631        ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1632        if (!ctx)
1633                return NULL;
1634        cf = &ctx->job;
1635        ctx->op = op;
1636        /* output */
1637        out_sg = &cf->sg[0];
1638        out_sg->extension = 1;
1639        qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1640
1641        /* 1st seg */
1642        sg = &cf->sg[2];
1643        qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1644        sg->offset = 0;
1645
1646        /* Successive segs */
1647        while (mbuf->next) {
1648                sg->length = mbuf->data_len;
1649                out_len += sg->length;
1650                mbuf = mbuf->next;
1651                cpu_to_hw_sg(sg);
1652                sg++;
1653                qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1654                sg->offset = 0;
1655        }
1656        sg->length = mbuf->buf_len - mbuf->data_off;
1657        out_len += sg->length;
1658        sg->final = 1;
1659        cpu_to_hw_sg(sg);
1660
1661        out_sg->length = out_len;
1662        cpu_to_hw_sg(out_sg);
1663
1664        /* input */
1665        mbuf = sym->m_src;
1666        in_sg = &cf->sg[1];
1667        in_sg->extension = 1;
1668        in_sg->final = 1;
1669        in_len = mbuf->data_len;
1670
1671        sg++;
1672        qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1673
1674        /* 1st seg */
1675        qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1676        sg->length = mbuf->data_len;
1677        sg->offset = 0;
1678
1679        /* Successive segs */
1680        mbuf = mbuf->next;
1681        while (mbuf) {
1682                cpu_to_hw_sg(sg);
1683                sg++;
1684                qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1685                sg->length = mbuf->data_len;
1686                sg->offset = 0;
1687                in_len += sg->length;
1688                mbuf = mbuf->next;
1689        }
1690        sg->final = 1;
1691        cpu_to_hw_sg(sg);
1692
1693        in_sg->length = in_len;
1694        cpu_to_hw_sg(in_sg);
1695
1696        sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1697
1698        return cf;
1699}
1700#endif
1701
1702static uint16_t
1703dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1704                       uint16_t nb_ops)
1705{
1706        /* Function to transmit the frames to given device and queuepair */
1707        uint32_t loop;
1708        struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1709        uint16_t num_tx = 0;
1710        struct qm_fd fds[DPAA_SEC_BURST], *fd;
1711        uint32_t frames_to_send;
1712        struct rte_crypto_op *op;
1713        struct dpaa_sec_job *cf;
1714        dpaa_sec_session *ses;
1715        uint16_t auth_hdr_len, auth_tail_len;
1716        uint32_t index, flags[DPAA_SEC_BURST] = {0};
1717        struct qman_fq *inq[DPAA_SEC_BURST];
1718
1719        while (nb_ops) {
1720                frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1721                                DPAA_SEC_BURST : nb_ops;
1722                for (loop = 0; loop < frames_to_send; loop++) {
1723                        op = *(ops++);
1724                        if (*dpaa_seqn(op->sym->m_src) != 0) {
1725                                index = *dpaa_seqn(op->sym->m_src) - 1;
1726                                if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1727                                        /* QM_EQCR_DCA_IDXMASK = 0x0f */
1728                                        flags[loop] = ((index & 0x0f) << 8);
1729                                        flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1730                                        DPAA_PER_LCORE_DQRR_SIZE--;
1731                                        DPAA_PER_LCORE_DQRR_HELD &=
1732                                                                ~(1 << index);
1733                                }
1734                        }
1735
1736                        switch (op->sess_type) {
1737                        case RTE_CRYPTO_OP_WITH_SESSION:
1738                                ses = (dpaa_sec_session *)
1739                                        get_sym_session_private_data(
1740                                                        op->sym->session,
1741                                                        cryptodev_driver_id);
1742                                break;
1743#ifdef RTE_LIB_SECURITY
1744                        case RTE_CRYPTO_OP_SECURITY_SESSION:
1745                                ses = (dpaa_sec_session *)
1746                                        get_sec_session_private_data(
1747                                                        op->sym->sec_session);
1748                                break;
1749#endif
1750                        default:
1751                                DPAA_SEC_DP_ERR(
1752                                        "sessionless crypto op not supported");
1753                                frames_to_send = loop;
1754                                nb_ops = loop;
1755                                goto send_pkts;
1756                        }
1757
1758                        if (!ses) {
1759                                DPAA_SEC_DP_ERR("session not available");
1760                                frames_to_send = loop;
1761                                nb_ops = loop;
1762                                goto send_pkts;
1763                        }
1764
1765                        if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1766                                if (dpaa_sec_attach_sess_q(qp, ses)) {
1767                                        frames_to_send = loop;
1768                                        nb_ops = loop;
1769                                        goto send_pkts;
1770                                }
1771                        } else if (unlikely(ses->qp[rte_lcore_id() %
1772                                                MAX_DPAA_CORES] != qp)) {
1773                                DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1774                                        " New qp = %p\n",
1775                                        ses->qp[rte_lcore_id() %
1776                                        MAX_DPAA_CORES], qp);
1777                                frames_to_send = loop;
1778                                nb_ops = loop;
1779                                goto send_pkts;
1780                        }
1781
1782                        auth_hdr_len = op->sym->auth.data.length -
1783                                                op->sym->cipher.data.length;
1784                        auth_tail_len = 0;
1785
1786                        if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1787                                  ((op->sym->m_dst == NULL) ||
1788                                   rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1789                                switch (ses->ctxt) {
1790#ifdef RTE_LIB_SECURITY
1791                                case DPAA_SEC_PDCP:
1792                                case DPAA_SEC_IPSEC:
1793                                        cf = build_proto(op, ses);
1794                                        break;
1795#endif
1796                                case DPAA_SEC_AUTH:
1797                                        cf = build_auth_only(op, ses);
1798                                        break;
1799                                case DPAA_SEC_CIPHER:
1800                                        cf = build_cipher_only(op, ses);
1801                                        break;
1802                                case DPAA_SEC_AEAD:
1803                                        cf = build_cipher_auth_gcm(op, ses);
1804                                        auth_hdr_len = ses->auth_only_len;
1805                                        break;
1806                                case DPAA_SEC_CIPHER_HASH:
1807                                        auth_hdr_len =
1808                                                op->sym->cipher.data.offset
1809                                                - op->sym->auth.data.offset;
1810                                        auth_tail_len =
1811                                                op->sym->auth.data.length
1812                                                - op->sym->cipher.data.length
1813                                                - auth_hdr_len;
1814                                        cf = build_cipher_auth(op, ses);
1815                                        break;
1816                                default:
1817                                        DPAA_SEC_DP_ERR("not supported ops");
1818                                        frames_to_send = loop;
1819                                        nb_ops = loop;
1820                                        goto send_pkts;
1821                                }
1822                        } else {
1823                                switch (ses->ctxt) {
1824#ifdef RTE_LIB_SECURITY
1825                                case DPAA_SEC_PDCP:
1826                                case DPAA_SEC_IPSEC:
1827                                        cf = build_proto_sg(op, ses);
1828                                        break;
1829#endif
1830                                case DPAA_SEC_AUTH:
1831                                        cf = build_auth_only_sg(op, ses);
1832                                        break;
1833                                case DPAA_SEC_CIPHER:
1834                                        cf = build_cipher_only_sg(op, ses);
1835                                        break;
1836                                case DPAA_SEC_AEAD:
1837                                        cf = build_cipher_auth_gcm_sg(op, ses);
1838                                        auth_hdr_len = ses->auth_only_len;
1839                                        break;
1840                                case DPAA_SEC_CIPHER_HASH:
1841                                        auth_hdr_len =
1842                                                op->sym->cipher.data.offset
1843                                                - op->sym->auth.data.offset;
1844                                        auth_tail_len =
1845                                                op->sym->auth.data.length
1846                                                - op->sym->cipher.data.length
1847                                                - auth_hdr_len;
1848                                        cf = build_cipher_auth_sg(op, ses);
1849                                        break;
1850                                default:
1851                                        DPAA_SEC_DP_ERR("not supported ops");
1852                                        frames_to_send = loop;
1853                                        nb_ops = loop;
1854                                        goto send_pkts;
1855                                }
1856                        }
1857                        if (unlikely(!cf)) {
1858                                frames_to_send = loop;
1859                                nb_ops = loop;
1860                                goto send_pkts;
1861                        }
1862
1863                        fd = &fds[loop];
1864                        inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1865                        fd->opaque_addr = 0;
1866                        fd->cmd = 0;
1867                        qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
1868                        fd->_format1 = qm_fd_compound;
1869                        fd->length29 = 2 * sizeof(struct qm_sg_entry);
1870
1871                        /* Auth_only_len is set as 0 in descriptor and it is
1872                         * overwritten here in the fd.cmd which will update
1873                         * the DPOVRD reg.
1874                         */
1875                        if (auth_hdr_len || auth_tail_len) {
1876                                fd->cmd = 0x80000000;
1877                                fd->cmd |=
1878                                        ((auth_tail_len << 16) | auth_hdr_len);
1879                        }
1880
1881#ifdef RTE_LIB_SECURITY
1882                        /* In case of PDCP, per packet HFN is stored in
1883                         * mbuf priv after sym_op.
1884                         */
1885                        if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
1886                                fd->cmd = 0x80000000 |
1887                                        *((uint32_t *)((uint8_t *)op +
1888                                        ses->pdcp.hfn_ovd_offset));
1889                                DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
1890                                        *((uint32_t *)((uint8_t *)op +
1891                                        ses->pdcp.hfn_ovd_offset)),
1892                                        ses->pdcp.hfn_ovd);
1893                        }
1894#endif
1895                }
1896send_pkts:
1897                loop = 0;
1898                while (loop < frames_to_send) {
1899                        loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1900                                        &flags[loop], frames_to_send - loop);
1901                }
1902                nb_ops -= frames_to_send;
1903                num_tx += frames_to_send;
1904        }
1905
1906        dpaa_qp->tx_pkts += num_tx;
1907        dpaa_qp->tx_errs += nb_ops - num_tx;
1908
1909        return num_tx;
1910}
1911
1912static uint16_t
1913dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1914                       uint16_t nb_ops)
1915{
1916        uint16_t num_rx;
1917        struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1918
1919        num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1920
1921        dpaa_qp->rx_pkts += num_rx;
1922        dpaa_qp->rx_errs += nb_ops - num_rx;
1923
1924        DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1925
1926        return num_rx;
1927}
1928
1929/** Release queue pair */
1930static int
1931dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1932                            uint16_t qp_id)
1933{
1934        struct dpaa_sec_dev_private *internals;
1935        struct dpaa_sec_qp *qp = NULL;
1936
1937        PMD_INIT_FUNC_TRACE();
1938
1939        DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1940
1941        internals = dev->data->dev_private;
1942        if (qp_id >= internals->max_nb_queue_pairs) {
1943                DPAA_SEC_ERR("Max supported qpid %d",
1944                             internals->max_nb_queue_pairs);
1945                return -EINVAL;
1946        }
1947
1948        qp = &internals->qps[qp_id];
1949        rte_mempool_free(qp->ctx_pool);
1950        qp->internals = NULL;
1951        dev->data->queue_pairs[qp_id] = NULL;
1952
1953        return 0;
1954}
1955
1956/** Setup a queue pair */
1957static int
1958dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1959                __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1960                __rte_unused int socket_id)
1961{
1962        struct dpaa_sec_dev_private *internals;
1963        struct dpaa_sec_qp *qp = NULL;
1964        char str[20];
1965
1966        DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1967
1968        internals = dev->data->dev_private;
1969        if (qp_id >= internals->max_nb_queue_pairs) {
1970                DPAA_SEC_ERR("Max supported qpid %d",
1971                             internals->max_nb_queue_pairs);
1972                return -EINVAL;
1973        }
1974
1975        qp = &internals->qps[qp_id];
1976        qp->internals = internals;
1977        snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
1978                        dev->data->dev_id, qp_id);
1979        if (!qp->ctx_pool) {
1980                qp->ctx_pool = rte_mempool_create((const char *)str,
1981                                                        CTX_POOL_NUM_BUFS,
1982                                                        CTX_POOL_BUF_SIZE,
1983                                                        CTX_POOL_CACHE_SIZE, 0,
1984                                                        NULL, NULL, NULL, NULL,
1985                                                        SOCKET_ID_ANY, 0);
1986                if (!qp->ctx_pool) {
1987                        DPAA_SEC_ERR("%s create failed\n", str);
1988                        return -ENOMEM;
1989                }
1990        } else
1991                DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
1992                                dev->data->dev_id, qp_id);
1993        dev->data->queue_pairs[qp_id] = qp;
1994
1995        return 0;
1996}
1997
1998/** Returns the size of session structure */
1999static unsigned int
2000dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2001{
2002        PMD_INIT_FUNC_TRACE();
2003
2004        return sizeof(dpaa_sec_session);
2005}
2006
2007static int
2008dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
2009                     struct rte_crypto_sym_xform *xform,
2010                     dpaa_sec_session *session)
2011{
2012        session->ctxt = DPAA_SEC_CIPHER;
2013        session->cipher_alg = xform->cipher.algo;
2014        session->iv.length = xform->cipher.iv.length;
2015        session->iv.offset = xform->cipher.iv.offset;
2016        session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2017                                               RTE_CACHE_LINE_SIZE);
2018        if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2019                DPAA_SEC_ERR("No Memory for cipher key");
2020                return -ENOMEM;
2021        }
2022        session->cipher_key.length = xform->cipher.key.length;
2023
2024        memcpy(session->cipher_key.data, xform->cipher.key.data,
2025               xform->cipher.key.length);
2026        switch (xform->cipher.algo) {
2027        case RTE_CRYPTO_CIPHER_AES_CBC:
2028                session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2029                session->cipher_key.algmode = OP_ALG_AAI_CBC;
2030                break;
2031        case RTE_CRYPTO_CIPHER_3DES_CBC:
2032                session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2033                session->cipher_key.algmode = OP_ALG_AAI_CBC;
2034                break;
2035        case RTE_CRYPTO_CIPHER_AES_CTR:
2036                session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2037                session->cipher_key.algmode = OP_ALG_AAI_CTR;
2038                break;
2039        case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2040                session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8;
2041                break;
2042        case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2043                session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE;
2044                break;
2045        default:
2046                DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2047                              xform->cipher.algo);
2048                return -ENOTSUP;
2049        }
2050        session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2051                        DIR_ENC : DIR_DEC;
2052
2053        return 0;
2054}
2055
2056static int
2057dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2058                   struct rte_crypto_sym_xform *xform,
2059                   dpaa_sec_session *session)
2060{
2061        session->ctxt = DPAA_SEC_AUTH;
2062        session->auth_alg = xform->auth.algo;
2063        session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
2064                                             RTE_CACHE_LINE_SIZE);
2065        if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
2066                DPAA_SEC_ERR("No Memory for auth key");
2067                return -ENOMEM;
2068        }
2069        session->auth_key.length = xform->auth.key.length;
2070        session->digest_length = xform->auth.digest_length;
2071        if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
2072                session->iv.offset = xform->auth.iv.offset;
2073                session->iv.length = xform->auth.iv.length;
2074        }
2075
2076        memcpy(session->auth_key.data, xform->auth.key.data,
2077               xform->auth.key.length);
2078
2079        switch (xform->auth.algo) {
2080        case RTE_CRYPTO_AUTH_SHA1_HMAC:
2081                session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2082                session->auth_key.algmode = OP_ALG_AAI_HMAC;
2083                break;
2084        case RTE_CRYPTO_AUTH_MD5_HMAC:
2085                session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2086                session->auth_key.algmode = OP_ALG_AAI_HMAC;
2087                break;
2088        case RTE_CRYPTO_AUTH_SHA224_HMAC:
2089                session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2090                session->auth_key.algmode = OP_ALG_AAI_HMAC;
2091                break;
2092        case RTE_CRYPTO_AUTH_SHA256_HMAC:
2093                session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2094                session->auth_key.algmode = OP_ALG_AAI_HMAC;
2095                break;
2096        case RTE_CRYPTO_AUTH_SHA384_HMAC:
2097                session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2098                session->auth_key.algmode = OP_ALG_AAI_HMAC;
2099                break;
2100        case RTE_CRYPTO_AUTH_SHA512_HMAC:
2101                session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2102                session->auth_key.algmode = OP_ALG_AAI_HMAC;
2103                break;
2104        case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2105                session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9;
2106                session->auth_key.algmode = OP_ALG_AAI_F9;
2107                break;
2108        case RTE_CRYPTO_AUTH_ZUC_EIA3:
2109                session->auth_key.alg = OP_ALG_ALGSEL_ZUCA;
2110                session->auth_key.algmode = OP_ALG_AAI_F9;
2111                break;
2112        default:
2113                DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2114                              xform->auth.algo);
2115                return -ENOTSUP;
2116        }
2117
2118        session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2119                        DIR_ENC : DIR_DEC;
2120
2121        return 0;
2122}
2123
2124static int
2125dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused,
2126                   struct rte_crypto_sym_xform *xform,
2127                   dpaa_sec_session *session)
2128{
2129
2130        struct rte_crypto_cipher_xform *cipher_xform;
2131        struct rte_crypto_auth_xform *auth_xform;
2132
2133        session->ctxt = DPAA_SEC_CIPHER_HASH;
2134        if (session->auth_cipher_text) {
2135                cipher_xform = &xform->cipher;
2136                auth_xform = &xform->next->auth;
2137        } else {
2138                cipher_xform = &xform->next->cipher;
2139                auth_xform = &xform->auth;
2140        }
2141
2142        /* Set IV parameters */
2143        session->iv.offset = cipher_xform->iv.offset;
2144        session->iv.length = cipher_xform->iv.length;
2145
2146        session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2147                                               RTE_CACHE_LINE_SIZE);
2148        if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2149                DPAA_SEC_ERR("No Memory for cipher key");
2150                return -ENOMEM;
2151        }
2152        session->cipher_key.length = cipher_xform->key.length;
2153        session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2154                                             RTE_CACHE_LINE_SIZE);
2155        if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2156                DPAA_SEC_ERR("No Memory for auth key");
2157                return -ENOMEM;
2158        }
2159        session->auth_key.length = auth_xform->key.length;
2160        memcpy(session->cipher_key.data, cipher_xform->key.data,
2161               cipher_xform->key.length);
2162        memcpy(session->auth_key.data, auth_xform->key.data,
2163               auth_xform->key.length);
2164
2165        session->digest_length = auth_xform->digest_length;
2166        session->auth_alg = auth_xform->algo;
2167
2168        switch (auth_xform->algo) {
2169        case RTE_CRYPTO_AUTH_SHA1_HMAC:
2170                session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2171                session->auth_key.algmode = OP_ALG_AAI_HMAC;
2172                break;
2173        case RTE_CRYPTO_AUTH_MD5_HMAC:
2174                session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2175                session->auth_key.algmode = OP_ALG_AAI_HMAC;
2176                break;
2177        case RTE_CRYPTO_AUTH_SHA224_HMAC:
2178                session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2179                session->auth_key.algmode = OP_ALG_AAI_HMAC;
2180                break;
2181        case RTE_CRYPTO_AUTH_SHA256_HMAC:
2182                session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2183                session->auth_key.algmode = OP_ALG_AAI_HMAC;
2184                break;
2185        case RTE_CRYPTO_AUTH_SHA384_HMAC:
2186                session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2187                session->auth_key.algmode = OP_ALG_AAI_HMAC;
2188                break;
2189        case RTE_CRYPTO_AUTH_SHA512_HMAC:
2190                session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2191                session->auth_key.algmode = OP_ALG_AAI_HMAC;
2192                break;
2193        default:
2194                DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2195                              auth_xform->algo);
2196                return -ENOTSUP;
2197        }
2198
2199        session->cipher_alg = cipher_xform->algo;
2200
2201        switch (cipher_xform->algo) {
2202        case RTE_CRYPTO_CIPHER_AES_CBC:
2203                session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2204                session->cipher_key.algmode = OP_ALG_AAI_CBC;
2205                break;
2206        case RTE_CRYPTO_CIPHER_3DES_CBC:
2207                session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2208                session->cipher_key.algmode = OP_ALG_AAI_CBC;
2209                break;
2210        case RTE_CRYPTO_CIPHER_AES_CTR:
2211                session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2212                session->cipher_key.algmode = OP_ALG_AAI_CTR;
2213                break;
2214        default:
2215                DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2216                              cipher_xform->algo);
2217                return -ENOTSUP;
2218        }
2219        session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2220                                DIR_ENC : DIR_DEC;
2221        return 0;
2222}
2223
2224static int
2225dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2226                   struct rte_crypto_sym_xform *xform,
2227                   dpaa_sec_session *session)
2228{
2229        session->aead_alg = xform->aead.algo;
2230        session->ctxt = DPAA_SEC_AEAD;
2231        session->iv.length = xform->aead.iv.length;
2232        session->iv.offset = xform->aead.iv.offset;
2233        session->auth_only_len = xform->aead.aad_length;
2234        session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2235                                             RTE_CACHE_LINE_SIZE);
2236        if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2237                DPAA_SEC_ERR("No Memory for aead key\n");
2238                return -ENOMEM;
2239        }
2240        session->aead_key.length = xform->aead.key.length;
2241        session->digest_length = xform->aead.digest_length;
2242
2243        memcpy(session->aead_key.data, xform->aead.key.data,
2244               xform->aead.key.length);
2245
2246        switch (session->aead_alg) {
2247        case RTE_CRYPTO_AEAD_AES_GCM:
2248                session->aead_key.alg = OP_ALG_ALGSEL_AES;
2249                session->aead_key.algmode = OP_ALG_AAI_GCM;
2250                break;
2251        default:
2252                DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg);
2253                return -ENOTSUP;
2254        }
2255
2256        session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2257                        DIR_ENC : DIR_DEC;
2258
2259        return 0;
2260}
2261
2262static struct qman_fq *
2263dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2264{
2265        unsigned int i;
2266
2267        for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2268                if (qi->inq_attach[i] == 0) {
2269                        qi->inq_attach[i] = 1;
2270                        return &qi->inq[i];
2271                }
2272        }
2273        DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2274
2275        return NULL;
2276}
2277
2278static int
2279dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2280{
2281        unsigned int i;
2282
2283        for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2284                if (&qi->inq[i] == fq) {
2285                        if (qman_retire_fq(fq, NULL) != 0)
2286                                DPAA_SEC_DEBUG("Queue is not retired\n");
2287                        qman_oos_fq(fq);
2288                        qi->inq_attach[i] = 0;
2289                        return 0;
2290                }
2291        }
2292        return -1;
2293}
2294
2295static int
2296dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2297{
2298        int ret;
2299
2300        sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2301        ret = dpaa_sec_prep_cdb(sess);
2302        if (ret) {
2303                DPAA_SEC_ERR("Unable to prepare sec cdb");
2304                return ret;
2305        }
2306        if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
2307                ret = rte_dpaa_portal_init((void *)0);
2308                if (ret) {
2309                        DPAA_SEC_ERR("Failure in affining portal");
2310                        return ret;
2311                }
2312        }
2313        ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2314                               rte_dpaa_mem_vtop(&sess->cdb),
2315                               qman_fq_fqid(&qp->outq));
2316        if (ret)
2317                DPAA_SEC_ERR("Unable to init sec queue");
2318
2319        return ret;
2320}
2321
2322static inline void
2323free_session_data(dpaa_sec_session *s)
2324{
2325        if (is_aead(s))
2326                rte_free(s->aead_key.data);
2327        else {
2328                rte_free(s->auth_key.data);
2329                rte_free(s->cipher_key.data);
2330        }
2331        memset(s, 0, sizeof(dpaa_sec_session));
2332}
2333
2334static int
2335dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2336                            struct rte_crypto_sym_xform *xform, void *sess)
2337{
2338        struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2339        dpaa_sec_session *session = sess;
2340        uint32_t i;
2341        int ret;
2342
2343        PMD_INIT_FUNC_TRACE();
2344
2345        if (unlikely(sess == NULL)) {
2346                DPAA_SEC_ERR("invalid session struct");
2347                return -EINVAL;
2348        }
2349        memset(session, 0, sizeof(dpaa_sec_session));
2350
2351        /* Default IV length = 0 */
2352        session->iv.length = 0;
2353
2354        /* Cipher Only */
2355        if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2356                session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2357                ret = dpaa_sec_cipher_init(dev, xform, session);
2358
2359        /* Authentication Only */
2360        } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2361                   xform->next == NULL) {
2362                session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2363                session->ctxt = DPAA_SEC_AUTH;
2364                ret = dpaa_sec_auth_init(dev, xform, session);
2365
2366        /* Cipher then Authenticate */
2367        } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2368                   xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2369                if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2370                        session->auth_cipher_text = 1;
2371                        if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2372                                ret = dpaa_sec_auth_init(dev, xform, session);
2373                        else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2374                                ret = dpaa_sec_cipher_init(dev, xform, session);
2375                        else
2376                                ret = dpaa_sec_chain_init(dev, xform, session);
2377                } else {
2378                        DPAA_SEC_ERR("Not supported: Auth then Cipher");
2379                        return -ENOTSUP;
2380                }
2381        /* Authenticate then Cipher */
2382        } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2383                   xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2384                if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2385                        session->auth_cipher_text = 0;
2386                        if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2387                                ret = dpaa_sec_cipher_init(dev, xform, session);
2388                        else if (xform->next->cipher.algo
2389                                        == RTE_CRYPTO_CIPHER_NULL)
2390                                ret = dpaa_sec_auth_init(dev, xform, session);
2391                        else
2392                                ret = dpaa_sec_chain_init(dev, xform, session);
2393                } else {
2394                        DPAA_SEC_ERR("Not supported: Auth then Cipher");
2395                        return -ENOTSUP;
2396                }
2397
2398        /* AEAD operation for AES-GCM kind of Algorithms */
2399        } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2400                   xform->next == NULL) {
2401                ret = dpaa_sec_aead_init(dev, xform, session);
2402
2403        } else {
2404                DPAA_SEC_ERR("Invalid crypto type");
2405                return -EINVAL;
2406        }
2407        if (ret) {
2408                DPAA_SEC_ERR("unable to init session");
2409                goto err1;
2410        }
2411
2412        rte_spinlock_lock(&internals->lock);
2413        for (i = 0; i < MAX_DPAA_CORES; i++) {
2414                session->inq[i] = dpaa_sec_attach_rxq(internals);
2415                if (session->inq[i] == NULL) {
2416                        DPAA_SEC_ERR("unable to attach sec queue");
2417                        rte_spinlock_unlock(&internals->lock);
2418                        ret = -EBUSY;
2419                        goto err1;
2420                }
2421        }
2422        rte_spinlock_unlock(&internals->lock);
2423
2424        return 0;
2425
2426err1:
2427        free_session_data(session);
2428        return ret;
2429}
2430
2431static int
2432dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2433                struct rte_crypto_sym_xform *xform,
2434                struct rte_cryptodev_sym_session *sess,
2435                struct rte_mempool *mempool)
2436{
2437        void *sess_private_data;
2438        int ret;
2439
2440        PMD_INIT_FUNC_TRACE();
2441
2442        if (rte_mempool_get(mempool, &sess_private_data)) {
2443                DPAA_SEC_ERR("Couldn't get object from session mempool");
2444                return -ENOMEM;
2445        }
2446
2447        ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2448        if (ret != 0) {
2449                DPAA_SEC_ERR("failed to configure session parameters");
2450
2451                /* Return session to mempool */
2452                rte_mempool_put(mempool, sess_private_data);
2453                return ret;
2454        }
2455
2456        set_sym_session_private_data(sess, dev->driver_id,
2457                        sess_private_data);
2458
2459
2460        return 0;
2461}
2462
2463static inline void
2464free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2465{
2466        struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2467        struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2468        uint8_t i;
2469
2470        for (i = 0; i < MAX_DPAA_CORES; i++) {
2471                if (s->inq[i])
2472                        dpaa_sec_detach_rxq(qi, s->inq[i]);
2473                s->inq[i] = NULL;
2474                s->qp[i] = NULL;
2475        }
2476        free_session_data(s);
2477        rte_mempool_put(sess_mp, (void *)s);
2478}
2479
2480/** Clear the memory of session so it doesn't leave key material behind */
2481static void
2482dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2483                struct rte_cryptodev_sym_session *sess)
2484{
2485        PMD_INIT_FUNC_TRACE();
2486        uint8_t index = dev->driver_id;
2487        void *sess_priv = get_sym_session_private_data(sess, index);
2488        dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2489
2490        if (sess_priv) {
2491                free_session_memory(dev, s);
2492                set_sym_session_private_data(sess, index, NULL);
2493        }
2494}
2495
2496#ifdef RTE_LIB_SECURITY
2497static int
2498dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2499                        struct rte_security_ipsec_xform *ipsec_xform,
2500                        dpaa_sec_session *session)
2501{
2502        PMD_INIT_FUNC_TRACE();
2503
2504        session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2505                                               RTE_CACHE_LINE_SIZE);
2506        if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2507                DPAA_SEC_ERR("No Memory for aead key");
2508                return -ENOMEM;
2509        }
2510        memcpy(session->aead_key.data, aead_xform->key.data,
2511               aead_xform->key.length);
2512
2513        session->digest_length = aead_xform->digest_length;
2514        session->aead_key.length = aead_xform->key.length;
2515
2516        switch (aead_xform->algo) {
2517        case RTE_CRYPTO_AEAD_AES_GCM:
2518                switch (session->digest_length) {
2519                case 8:
2520                        session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8;
2521                        break;
2522                case 12:
2523                        session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12;
2524                        break;
2525                case 16:
2526                        session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16;
2527                        break;
2528                default:
2529                        DPAA_SEC_ERR("Crypto: Undefined GCM digest %d",
2530                                     session->digest_length);
2531                        return -EINVAL;
2532                }
2533                if (session->dir == DIR_ENC) {
2534                        memcpy(session->encap_pdb.gcm.salt,
2535                                (uint8_t *)&(ipsec_xform->salt), 4);
2536                } else {
2537                        memcpy(session->decap_pdb.gcm.salt,
2538                                (uint8_t *)&(ipsec_xform->salt), 4);
2539                }
2540                session->aead_key.algmode = OP_ALG_AAI_GCM;
2541                session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2542                break;
2543        default:
2544                DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u",
2545                              aead_xform->algo);
2546                return -ENOTSUP;
2547        }
2548        return 0;
2549}
2550
2551static int
2552dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2553        struct rte_crypto_auth_xform *auth_xform,
2554        struct rte_security_ipsec_xform *ipsec_xform,
2555        dpaa_sec_session *session)
2556{
2557        if (cipher_xform) {
2558                session->cipher_key.data = rte_zmalloc(NULL,
2559                                                       cipher_xform->key.length,
2560                                                       RTE_CACHE_LINE_SIZE);
2561                if (session->cipher_key.data == NULL &&
2562                                cipher_xform->key.length > 0) {
2563                        DPAA_SEC_ERR("No Memory for cipher key");
2564                        return -ENOMEM;
2565                }
2566
2567                session->cipher_key.length = cipher_xform->key.length;
2568                memcpy(session->cipher_key.data, cipher_xform->key.data,
2569                                cipher_xform->key.length);
2570                session->cipher_alg = cipher_xform->algo;
2571        } else {
2572                session->cipher_key.data = NULL;
2573                session->cipher_key.length = 0;
2574                session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2575        }
2576
2577        if (auth_xform) {
2578                session->auth_key.data = rte_zmalloc(NULL,
2579                                                auth_xform->key.length,
2580                                                RTE_CACHE_LINE_SIZE);
2581                if (session->auth_key.data == NULL &&
2582                                auth_xform->key.length > 0) {
2583                        DPAA_SEC_ERR("No Memory for auth key");
2584                        return -ENOMEM;
2585                }
2586                session->auth_key.length = auth_xform->key.length;
2587                memcpy(session->auth_key.data, auth_xform->key.data,
2588                                auth_xform->key.length);
2589                session->auth_alg = auth_xform->algo;
2590                session->digest_length = auth_xform->digest_length;
2591        } else {
2592                session->auth_key.data = NULL;
2593                session->auth_key.length = 0;
2594                session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2595        }
2596
2597        switch (session->auth_alg) {
2598        case RTE_CRYPTO_AUTH_SHA1_HMAC:
2599                session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96;
2600                session->auth_key.algmode = OP_ALG_AAI_HMAC;
2601                break;
2602        case RTE_CRYPTO_AUTH_MD5_HMAC:
2603                session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96;
2604                session->auth_key.algmode = OP_ALG_AAI_HMAC;
2605                break;
2606        case RTE_CRYPTO_AUTH_SHA256_HMAC:
2607                session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2608                session->auth_key.algmode = OP_ALG_AAI_HMAC;
2609                if (session->digest_length != 16)
2610                        DPAA_SEC_WARN(
2611                        "+++Using sha256-hmac truncated len is non-standard,"
2612                        "it will not work with lookaside proto");
2613                break;
2614        case RTE_CRYPTO_AUTH_SHA384_HMAC:
2615                session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2616                session->auth_key.algmode = OP_ALG_AAI_HMAC;
2617                break;
2618        case RTE_CRYPTO_AUTH_SHA512_HMAC:
2619                session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2620                session->auth_key.algmode = OP_ALG_AAI_HMAC;
2621                break;
2622        case RTE_CRYPTO_AUTH_AES_CMAC:
2623                session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96;
2624                break;
2625        case RTE_CRYPTO_AUTH_NULL:
2626                session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
2627                break;
2628        case RTE_CRYPTO_AUTH_SHA224_HMAC:
2629        case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2630        case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2631        case RTE_CRYPTO_AUTH_SHA1:
2632        case RTE_CRYPTO_AUTH_SHA256:
2633        case RTE_CRYPTO_AUTH_SHA512:
2634        case RTE_CRYPTO_AUTH_SHA224:
2635        case RTE_CRYPTO_AUTH_SHA384:
2636        case RTE_CRYPTO_AUTH_MD5:
2637        case RTE_CRYPTO_AUTH_AES_GMAC:
2638        case RTE_CRYPTO_AUTH_KASUMI_F9:
2639        case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2640        case RTE_CRYPTO_AUTH_ZUC_EIA3:
2641                DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2642                              session->auth_alg);
2643                return -ENOTSUP;
2644        default:
2645                DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
2646                              session->auth_alg);
2647                return -ENOTSUP;
2648        }
2649
2650        switch (session->cipher_alg) {
2651        case RTE_CRYPTO_CIPHER_AES_CBC:
2652                session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
2653                session->cipher_key.algmode = OP_ALG_AAI_CBC;
2654                break;
2655        case RTE_CRYPTO_CIPHER_3DES_CBC:
2656                session->cipher_key.alg = OP_PCL_IPSEC_3DES;
2657                session->cipher_key.algmode = OP_ALG_AAI_CBC;
2658                break;
2659        case RTE_CRYPTO_CIPHER_AES_CTR:
2660                session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR;
2661                session->cipher_key.algmode = OP_ALG_AAI_CTR;
2662                if (session->dir == DIR_ENC) {
2663                        session->encap_pdb.ctr.ctr_initial = 0x00000001;
2664                        session->encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2665                } else {
2666                        session->decap_pdb.ctr.ctr_initial = 0x00000001;
2667                        session->decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2668                }
2669                break;
2670        case RTE_CRYPTO_CIPHER_NULL:
2671                session->cipher_key.alg = OP_PCL_IPSEC_NULL;
2672                break;
2673        case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2674        case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2675        case RTE_CRYPTO_CIPHER_3DES_ECB:
2676        case RTE_CRYPTO_CIPHER_AES_ECB:
2677        case RTE_CRYPTO_CIPHER_KASUMI_F8:
2678                DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2679                              session->cipher_alg);
2680                return -ENOTSUP;
2681        default:
2682                DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2683                              session->cipher_alg);
2684                return -ENOTSUP;
2685        }
2686
2687        return 0;
2688}
2689
2690static int
2691dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2692                           struct rte_security_session_conf *conf,
2693                           void *sess)
2694{
2695        struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2696        struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2697        struct rte_crypto_auth_xform *auth_xform = NULL;
2698        struct rte_crypto_cipher_xform *cipher_xform = NULL;
2699        struct rte_crypto_aead_xform *aead_xform = NULL;
2700        dpaa_sec_session *session = (dpaa_sec_session *)sess;
2701        uint32_t i;
2702        int ret;
2703
2704        PMD_INIT_FUNC_TRACE();
2705
2706        memset(session, 0, sizeof(dpaa_sec_session));
2707        session->proto_alg = conf->protocol;
2708        session->ctxt = DPAA_SEC_IPSEC;
2709
2710        if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
2711                session->dir = DIR_ENC;
2712        else
2713                session->dir = DIR_DEC;
2714
2715        if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2716                cipher_xform = &conf->crypto_xform->cipher;
2717                if (conf->crypto_xform->next)
2718                        auth_xform = &conf->crypto_xform->next->auth;
2719                ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2720                                        ipsec_xform, session);
2721        } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2722                auth_xform = &conf->crypto_xform->auth;
2723                if (conf->crypto_xform->next)
2724                        cipher_xform = &conf->crypto_xform->next->cipher;
2725                ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2726                                        ipsec_xform, session);
2727        } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2728                aead_xform = &conf->crypto_xform->aead;
2729                ret = dpaa_sec_ipsec_aead_init(aead_xform,
2730                                        ipsec_xform, session);
2731        } else {
2732                DPAA_SEC_ERR("XFORM not specified");
2733                ret = -EINVAL;
2734                goto out;
2735        }
2736        if (ret) {
2737                DPAA_SEC_ERR("Failed to process xform");
2738                goto out;
2739        }
2740
2741        if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2742                if (ipsec_xform->tunnel.type ==
2743                                RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2744                        session->ip4_hdr.ip_v = IPVERSION;
2745                        session->ip4_hdr.ip_hl = 5;
2746                        session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2747                                                sizeof(session->ip4_hdr));
2748                        session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2749                        session->ip4_hdr.ip_id = 0;
2750                        session->ip4_hdr.ip_off = 0;
2751                        session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2752                        session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2753                                        RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2754                                        IPPROTO_ESP : IPPROTO_AH;
2755                        session->ip4_hdr.ip_sum = 0;
2756                        session->ip4_hdr.ip_src =
2757                                        ipsec_xform->tunnel.ipv4.src_ip;
2758                        session->ip4_hdr.ip_dst =
2759                                        ipsec_xform->tunnel.ipv4.dst_ip;
2760                        session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2761                                                (void *)&session->ip4_hdr,
2762                                                sizeof(struct ip));
2763                        session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2764                } else if (ipsec_xform->tunnel.type ==
2765                                RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2766                        session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2767                                DPAA_IPv6_DEFAULT_VTC_FLOW |
2768                                ((ipsec_xform->tunnel.ipv6.dscp <<
2769                                        RTE_IPV6_HDR_TC_SHIFT) &
2770                                        RTE_IPV6_HDR_TC_MASK) |
2771                                ((ipsec_xform->tunnel.ipv6.flabel <<
2772                                        RTE_IPV6_HDR_FL_SHIFT) &
2773                                        RTE_IPV6_HDR_FL_MASK));
2774                        /* Payload length will be updated by HW */
2775                        session->ip6_hdr.payload_len = 0;
2776                        session->ip6_hdr.hop_limits =
2777                                        ipsec_xform->tunnel.ipv6.hlimit;
2778                        session->ip6_hdr.proto = (ipsec_xform->proto ==
2779                                        RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2780                                        IPPROTO_ESP : IPPROTO_AH;
2781                        memcpy(&session->ip6_hdr.src_addr,
2782                                        &ipsec_xform->tunnel.ipv6.src_addr, 16);
2783                        memcpy(&session->ip6_hdr.dst_addr,
2784                                        &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2785                        session->encap_pdb.ip_hdr_len =
2786                                                sizeof(struct rte_ipv6_hdr);
2787                }
2788                session->encap_pdb.options =
2789                        (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2790                        PDBOPTS_ESP_OIHI_PDB_INL |
2791                        PDBOPTS_ESP_IVSRC |
2792                        PDBHMO_ESP_ENCAP_DTTL |
2793                        PDBHMO_ESP_SNR;
2794                if (ipsec_xform->options.esn)
2795                        session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2796                session->encap_pdb.spi = ipsec_xform->spi;
2797
2798        } else if (ipsec_xform->direction ==
2799                        RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2800                if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2801                        session->decap_pdb.options = sizeof(struct ip) << 16;
2802                else
2803                        session->decap_pdb.options =
2804                                        sizeof(struct rte_ipv6_hdr) << 16;
2805                if (ipsec_xform->options.esn)
2806                        session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2807                if (ipsec_xform->replay_win_sz) {
2808                        uint32_t win_sz;
2809                        win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
2810
2811                        switch (win_sz) {
2812                        case 1:
2813                        case 2:
2814                        case 4:
2815                        case 8:
2816                        case 16:
2817                        case 32:
2818                                session->decap_pdb.options |= PDBOPTS_ESP_ARS32;
2819                                break;
2820                        case 64:
2821                                session->decap_pdb.options |= PDBOPTS_ESP_ARS64;
2822                                break;
2823                        default:
2824                                session->decap_pdb.options |=
2825                                                        PDBOPTS_ESP_ARS128;
2826                        }
2827                }
2828        } else
2829                goto out;
2830        rte_spinlock_lock(&internals->lock);
2831        for (i = 0; i < MAX_DPAA_CORES; i++) {
2832                session->inq[i] = dpaa_sec_attach_rxq(internals);
2833                if (session->inq[i] == NULL) {
2834                        DPAA_SEC_ERR("unable to attach sec queue");
2835                        rte_spinlock_unlock(&internals->lock);
2836                        goto out;
2837                }
2838        }
2839        rte_spinlock_unlock(&internals->lock);
2840
2841        return 0;
2842out:
2843        free_session_data(session);
2844        return -1;
2845}
2846
2847static int
2848dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2849                          struct rte_security_session_conf *conf,
2850                          void *sess)
2851{
2852        struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2853        struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2854        struct rte_crypto_auth_xform *auth_xform = NULL;
2855        struct rte_crypto_cipher_xform *cipher_xform = NULL;
2856        dpaa_sec_session *session = (dpaa_sec_session *)sess;
2857        struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2858        uint32_t i;
2859        int ret;
2860
2861        PMD_INIT_FUNC_TRACE();
2862
2863        memset(session, 0, sizeof(dpaa_sec_session));
2864
2865        /* find xfrm types */
2866        if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2867                cipher_xform = &xform->cipher;
2868                if (xform->next != NULL)
2869                        auth_xform = &xform->next->auth;
2870        } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2871                auth_xform = &xform->auth;
2872                if (xform->next != NULL)
2873                        cipher_xform = &xform->next->cipher;
2874        } else {
2875                DPAA_SEC_ERR("Invalid crypto type");
2876                return -EINVAL;
2877        }
2878
2879        session->proto_alg = conf->protocol;
2880        session->ctxt = DPAA_SEC_PDCP;
2881
2882        if (cipher_xform) {
2883                switch (cipher_xform->algo) {
2884                case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2885                        session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW;
2886                        break;
2887                case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2888                        session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC;
2889                        break;
2890                case RTE_CRYPTO_CIPHER_AES_CTR:
2891                        session->cipher_key.alg = PDCP_CIPHER_TYPE_AES;
2892                        break;
2893                case RTE_CRYPTO_CIPHER_NULL:
2894                        session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL;
2895                        break;
2896                default:
2897                        DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2898                                      session->cipher_alg);
2899                        return -EINVAL;
2900                }
2901
2902                session->cipher_key.data = rte_zmalloc(NULL,
2903                                               cipher_xform->key.length,
2904                                               RTE_CACHE_LINE_SIZE);
2905                if (session->cipher_key.data == NULL &&
2906                                cipher_xform->key.length > 0) {
2907                        DPAA_SEC_ERR("No Memory for cipher key");
2908                        return -ENOMEM;
2909                }
2910                session->cipher_key.length = cipher_xform->key.length;
2911                memcpy(session->cipher_key.data, cipher_xform->key.data,
2912                        cipher_xform->key.length);
2913                session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2914                                        DIR_ENC : DIR_DEC;
2915                session->cipher_alg = cipher_xform->algo;
2916        } else {
2917                session->cipher_key.data = NULL;
2918                session->cipher_key.length = 0;
2919                session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2920                session->dir = DIR_ENC;
2921        }
2922
2923        if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2924                if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
2925                    pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
2926                        DPAA_SEC_ERR(
2927                                "PDCP Seq Num size should be 5/12 bits for cmode");
2928                        ret = -EINVAL;
2929                        goto out;
2930                }
2931        }
2932
2933        if (auth_xform) {
2934                switch (auth_xform->algo) {
2935                case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2936                        session->auth_key.alg = PDCP_AUTH_TYPE_SNOW;
2937                        break;
2938                case RTE_CRYPTO_AUTH_ZUC_EIA3:
2939                        session->auth_key.alg = PDCP_AUTH_TYPE_ZUC;
2940                        break;
2941                case RTE_CRYPTO_AUTH_AES_CMAC:
2942                        session->auth_key.alg = PDCP_AUTH_TYPE_AES;
2943                        break;
2944                case RTE_CRYPTO_AUTH_NULL:
2945                        session->auth_key.alg = PDCP_AUTH_TYPE_NULL;
2946                        break;
2947                default:
2948                        DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2949                                      session->auth_alg);
2950                        rte_free(session->cipher_key.data);
2951                        return -EINVAL;
2952                }
2953                session->auth_key.data = rte_zmalloc(NULL,
2954                                                     auth_xform->key.length,
2955                                                     RTE_CACHE_LINE_SIZE);
2956                if (!session->auth_key.data &&
2957                    auth_xform->key.length > 0) {
2958                        DPAA_SEC_ERR("No Memory for auth key");
2959                        rte_free(session->cipher_key.data);
2960                        return -ENOMEM;
2961                }
2962                session->auth_key.length = auth_xform->key.length;
2963                memcpy(session->auth_key.data, auth_xform->key.data,
2964                       auth_xform->key.length);
2965                session->auth_alg = auth_xform->algo;
2966        } else {
2967                session->auth_key.data = NULL;
2968                session->auth_key.length = 0;
2969                session->auth_alg = 0;
2970        }
2971        session->pdcp.domain = pdcp_xform->domain;
2972        session->pdcp.bearer = pdcp_xform->bearer;
2973        session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2974        session->pdcp.sn_size = pdcp_xform->sn_size;
2975        session->pdcp.hfn = pdcp_xform->hfn;
2976        session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2977        session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
2978        session->pdcp.sdap_enabled = pdcp_xform->sdap_enabled;
2979        if (cipher_xform)
2980                session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
2981
2982        rte_spinlock_lock(&dev_priv->lock);
2983        for (i = 0; i < MAX_DPAA_CORES; i++) {
2984                session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
2985                if (session->inq[i] == NULL) {
2986                        DPAA_SEC_ERR("unable to attach sec queue");
2987                        rte_spinlock_unlock(&dev_priv->lock);
2988                        ret = -EBUSY;
2989                        goto out;
2990                }
2991        }
2992        rte_spinlock_unlock(&dev_priv->lock);
2993        return 0;
2994out:
2995        rte_free(session->auth_key.data);
2996        rte_free(session->cipher_key.data);
2997        memset(session, 0, sizeof(dpaa_sec_session));
2998        return ret;
2999}
3000
3001static int
3002dpaa_sec_security_session_create(void *dev,
3003                                 struct rte_security_session_conf *conf,
3004                                 struct rte_security_session *sess,
3005                                 struct rte_mempool *mempool)
3006{
3007        void *sess_private_data;
3008        struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3009        int ret;
3010
3011        if (rte_mempool_get(mempool, &sess_private_data)) {
3012                DPAA_SEC_ERR("Couldn't get object from session mempool");
3013                return -ENOMEM;
3014        }
3015
3016        switch (conf->protocol) {
3017        case RTE_SECURITY_PROTOCOL_IPSEC:
3018                ret = dpaa_sec_set_ipsec_session(cdev, conf,
3019                                sess_private_data);
3020                break;
3021        case RTE_SECURITY_PROTOCOL_PDCP:
3022                ret = dpaa_sec_set_pdcp_session(cdev, conf,
3023                                sess_private_data);
3024                break;
3025        case RTE_SECURITY_PROTOCOL_MACSEC:
3026                return -ENOTSUP;
3027        default:
3028                return -EINVAL;
3029        }
3030        if (ret != 0) {
3031                DPAA_SEC_ERR("failed to configure session parameters");
3032                /* Return session to mempool */
3033                rte_mempool_put(mempool, sess_private_data);
3034                return ret;
3035        }
3036
3037        set_sec_session_private_data(sess, sess_private_data);
3038
3039        return ret;
3040}
3041
3042/** Clear the memory of session so it doesn't leave key material behind */
3043static int
3044dpaa_sec_security_session_destroy(void *dev __rte_unused,
3045                struct rte_security_session *sess)
3046{
3047        PMD_INIT_FUNC_TRACE();
3048        void *sess_priv = get_sec_session_private_data(sess);
3049        dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
3050
3051        if (sess_priv) {
3052                free_session_memory((struct rte_cryptodev *)dev, s);
3053                set_sec_session_private_data(sess, NULL);
3054        }
3055        return 0;
3056}
3057#endif
3058static int
3059dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3060                       struct rte_cryptodev_config *config __rte_unused)
3061{
3062        PMD_INIT_FUNC_TRACE();
3063
3064        return 0;
3065}
3066
3067static int
3068dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
3069{
3070        PMD_INIT_FUNC_TRACE();
3071        return 0;
3072}
3073
3074static void
3075dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
3076{
3077        PMD_INIT_FUNC_TRACE();
3078}
3079
3080static int
3081dpaa_sec_dev_close(struct rte_cryptodev *dev)
3082{
3083        PMD_INIT_FUNC_TRACE();
3084
3085        if (dev == NULL)
3086                return -ENOMEM;
3087
3088        return 0;
3089}
3090
3091static void
3092dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
3093                       struct rte_cryptodev_info *info)
3094{
3095        struct dpaa_sec_dev_private *internals = dev->data->dev_private;
3096
3097        PMD_INIT_FUNC_TRACE();
3098        if (info != NULL) {
3099                info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3100                info->feature_flags = dev->feature_flags;
3101                info->capabilities = dpaa_sec_capabilities;
3102                info->sym.max_nb_sessions = internals->max_nb_sessions;
3103                info->driver_id = cryptodev_driver_id;
3104        }
3105}
3106
3107static enum qman_cb_dqrr_result
3108dpaa_sec_process_parallel_event(void *event,
3109                        struct qman_portal *qm __always_unused,
3110                        struct qman_fq *outq,
3111                        const struct qm_dqrr_entry *dqrr,
3112                        void **bufs)
3113{
3114        const struct qm_fd *fd;
3115        struct dpaa_sec_job *job;
3116        struct dpaa_sec_op_ctx *ctx;
3117        struct rte_event *ev = (struct rte_event *)event;
3118
3119        fd = &dqrr->fd;
3120
3121        /* sg is embedded in an op ctx,
3122         * sg[0] is for output
3123         * sg[1] for input
3124         */
3125        job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3126
3127        ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3128        ctx->fd_status = fd->status;
3129        if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3130                struct qm_sg_entry *sg_out;
3131                uint32_t len;
3132
3133                sg_out = &job->sg[0];
3134                hw_sg_to_cpu(sg_out);
3135                len = sg_out->length;
3136                ctx->op->sym->m_src->pkt_len = len;
3137                ctx->op->sym->m_src->data_len = len;
3138        }
3139        if (!ctx->fd_status) {
3140                ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3141        } else {
3142                DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3143                ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3144        }
3145        ev->event_ptr = (void *)ctx->op;
3146
3147        ev->flow_id = outq->ev.flow_id;
3148        ev->sub_event_type = outq->ev.sub_event_type;
3149        ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3150        ev->op = RTE_EVENT_OP_NEW;
3151        ev->sched_type = outq->ev.sched_type;
3152        ev->queue_id = outq->ev.queue_id;
3153        ev->priority = outq->ev.priority;
3154        *bufs = (void *)ctx->op;
3155
3156        rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3157
3158        return qman_cb_dqrr_consume;
3159}
3160
3161static enum qman_cb_dqrr_result
3162dpaa_sec_process_atomic_event(void *event,
3163                        struct qman_portal *qm __rte_unused,
3164                        struct qman_fq *outq,
3165                        const struct qm_dqrr_entry *dqrr,
3166                        void **bufs)
3167{
3168        u8 index;
3169        const struct qm_fd *fd;
3170        struct dpaa_sec_job *job;
3171        struct dpaa_sec_op_ctx *ctx;
3172        struct rte_event *ev = (struct rte_event *)event;
3173
3174        fd = &dqrr->fd;
3175
3176        /* sg is embedded in an op ctx,
3177         * sg[0] is for output
3178         * sg[1] for input
3179         */
3180        job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3181
3182        ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3183        ctx->fd_status = fd->status;
3184        if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3185                struct qm_sg_entry *sg_out;
3186                uint32_t len;
3187
3188                sg_out = &job->sg[0];
3189                hw_sg_to_cpu(sg_out);
3190                len = sg_out->length;
3191                ctx->op->sym->m_src->pkt_len = len;
3192                ctx->op->sym->m_src->data_len = len;
3193        }
3194        if (!ctx->fd_status) {
3195                ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3196        } else {
3197                DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3198                ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3199        }
3200        ev->event_ptr = (void *)ctx->op;
3201        ev->flow_id = outq->ev.flow_id;
3202        ev->sub_event_type = outq->ev.sub_event_type;
3203        ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3204        ev->op = RTE_EVENT_OP_NEW;
3205        ev->sched_type = outq->ev.sched_type;
3206        ev->queue_id = outq->ev.queue_id;
3207        ev->priority = outq->ev.priority;
3208
3209        /* Save active dqrr entries */
3210        index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
3211        DPAA_PER_LCORE_DQRR_SIZE++;
3212        DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
3213        DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
3214        ev->impl_opaque = index + 1;
3215        *dpaa_seqn(ctx->op->sym->m_src) = (uint32_t)index + 1;
3216        *bufs = (void *)ctx->op;
3217
3218        rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3219
3220        return qman_cb_dqrr_defer;
3221}
3222
3223int
3224dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
3225                int qp_id,
3226                uint16_t ch_id,
3227                const struct rte_event *event)
3228{
3229        struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3230        struct qm_mcc_initfq opts = {0};
3231
3232        int ret;
3233
3234        opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3235                       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3236        opts.fqd.dest.channel = ch_id;
3237
3238        switch (event->sched_type) {
3239        case RTE_SCHED_TYPE_ATOMIC:
3240                opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
3241                /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
3242                 * configuration with HOLD_ACTIVE setting
3243                 */
3244                opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
3245                qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
3246                break;
3247        case RTE_SCHED_TYPE_ORDERED:
3248                DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
3249                return -ENOTSUP;
3250        default:
3251                opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
3252                qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
3253                break;
3254        }
3255
3256        ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
3257        if (unlikely(ret)) {
3258                DPAA_SEC_ERR("unable to init caam source fq!");
3259                return ret;
3260        }
3261
3262        memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
3263
3264        return 0;
3265}
3266
3267int
3268dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
3269                        int qp_id)
3270{
3271        struct qm_mcc_initfq opts = {0};
3272        int ret;
3273        struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3274
3275        opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3276                       QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3277        qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
3278        qp->outq.cb.ern  = ern_sec_fq_handler;
3279        qman_retire_fq(&qp->outq, NULL);
3280        qman_oos_fq(&qp->outq);
3281        ret = qman_init_fq(&qp->outq, 0, &opts);
3282        if (ret)
3283                RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
3284        qp->outq.cb.dqrr = NULL;
3285
3286        return ret;
3287}
3288
3289static struct rte_cryptodev_ops crypto_ops = {
3290        .dev_configure        = dpaa_sec_dev_configure,
3291        .dev_start            = dpaa_sec_dev_start,
3292        .dev_stop             = dpaa_sec_dev_stop,
3293        .dev_close            = dpaa_sec_dev_close,
3294        .dev_infos_get        = dpaa_sec_dev_infos_get,
3295        .queue_pair_setup     = dpaa_sec_queue_pair_setup,
3296        .queue_pair_release   = dpaa_sec_queue_pair_release,
3297        .sym_session_get_size     = dpaa_sec_sym_session_get_size,
3298        .sym_session_configure    = dpaa_sec_sym_session_configure,
3299        .sym_session_clear        = dpaa_sec_sym_session_clear
3300};
3301
3302#ifdef RTE_LIB_SECURITY
3303static const struct rte_security_capability *
3304dpaa_sec_capabilities_get(void *device __rte_unused)
3305{
3306        return dpaa_sec_security_cap;
3307}
3308
3309static const struct rte_security_ops dpaa_sec_security_ops = {
3310        .session_create = dpaa_sec_security_session_create,
3311        .session_update = NULL,
3312        .session_stats_get = NULL,
3313        .session_destroy = dpaa_sec_security_session_destroy,
3314        .set_pkt_metadata = NULL,
3315        .capabilities_get = dpaa_sec_capabilities_get
3316};
3317#endif
3318static int
3319dpaa_sec_uninit(struct rte_cryptodev *dev)
3320{
3321        struct dpaa_sec_dev_private *internals;
3322
3323        if (dev == NULL)
3324                return -ENODEV;
3325
3326        internals = dev->data->dev_private;
3327        rte_free(dev->security_ctx);
3328
3329        rte_free(internals);
3330
3331        DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
3332                      dev->data->name, rte_socket_id());
3333
3334        return 0;
3335}
3336
3337static int
3338dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
3339{
3340        struct dpaa_sec_dev_private *internals;
3341#ifdef RTE_LIB_SECURITY
3342        struct rte_security_ctx *security_instance;
3343#endif
3344        struct dpaa_sec_qp *qp;
3345        uint32_t i, flags;
3346        int ret;
3347
3348        PMD_INIT_FUNC_TRACE();
3349
3350        cryptodev->driver_id = cryptodev_driver_id;
3351        cryptodev->dev_ops = &crypto_ops;
3352
3353        cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
3354        cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
3355        cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3356                        RTE_CRYPTODEV_FF_HW_ACCELERATED |
3357                        RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3358                        RTE_CRYPTODEV_FF_SECURITY |
3359                        RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3360                        RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3361                        RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3362                        RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3363                        RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3364
3365        internals = cryptodev->data->dev_private;
3366        internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
3367        internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
3368
3369        /*
3370         * For secondary processes, we don't initialise any further as primary
3371         * has already done this work. Only check we don't need a different
3372         * RX function
3373         */
3374        if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3375                DPAA_SEC_WARN("Device already init by primary process");
3376                return 0;
3377        }
3378#ifdef RTE_LIB_SECURITY
3379        /* Initialize security_ctx only for primary process*/
3380        security_instance = rte_malloc("rte_security_instances_ops",
3381                                sizeof(struct rte_security_ctx), 0);
3382        if (security_instance == NULL)
3383                return -ENOMEM;
3384        security_instance->device = (void *)cryptodev;
3385        security_instance->ops = &dpaa_sec_security_ops;
3386        security_instance->sess_cnt = 0;
3387        cryptodev->security_ctx = security_instance;
3388#endif
3389        rte_spinlock_init(&internals->lock);
3390        for (i = 0; i < internals->max_nb_queue_pairs; i++) {
3391                /* init qman fq for queue pair */
3392                qp = &internals->qps[i];
3393                ret = dpaa_sec_init_tx(&qp->outq);
3394                if (ret) {
3395                        DPAA_SEC_ERR("config tx of queue pair  %d", i);
3396                        goto init_error;
3397                }
3398        }
3399
3400        flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
3401                QMAN_FQ_FLAG_TO_DCPORTAL;
3402        for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
3403                /* create rx qman fq for sessions*/
3404                ret = qman_create_fq(0, flags, &internals->inq[i]);
3405                if (unlikely(ret != 0)) {
3406                        DPAA_SEC_ERR("sec qman_create_fq failed");
3407                        goto init_error;
3408                }
3409        }
3410
3411        RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
3412        return 0;
3413
3414init_error:
3415        DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3416
3417        rte_free(cryptodev->security_ctx);
3418        return -EFAULT;
3419}
3420
3421static int
3422cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3423                                struct rte_dpaa_device *dpaa_dev)
3424{
3425        struct rte_cryptodev *cryptodev;
3426        char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3427
3428        int retval;
3429
3430        snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3431
3432        cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3433        if (cryptodev == NULL)
3434                return -ENOMEM;
3435
3436        if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3437                cryptodev->data->dev_private = rte_zmalloc_socket(
3438                                        "cryptodev private structure",
3439                                        sizeof(struct dpaa_sec_dev_private),
3440                                        RTE_CACHE_LINE_SIZE,
3441                                        rte_socket_id());
3442
3443                if (cryptodev->data->dev_private == NULL)
3444                        rte_panic("Cannot allocate memzone for private "
3445                                        "device data");
3446        }
3447
3448        dpaa_dev->crypto_dev = cryptodev;
3449        cryptodev->device = &dpaa_dev->device;
3450
3451        /* init user callbacks */
3452        TAILQ_INIT(&(cryptodev->link_intr_cbs));
3453
3454        /* if sec device version is not configured */
3455        if (!rta_get_sec_era()) {
3456                const struct device_node *caam_node;
3457
3458                for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3459                        const uint32_t *prop = of_get_property(caam_node,
3460                                        "fsl,sec-era",
3461                                        NULL);
3462                        if (prop) {
3463                                rta_set_sec_era(
3464                                        INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3465                                break;
3466                        }
3467                }
3468        }
3469
3470        if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
3471                retval = rte_dpaa_portal_init((void *)1);
3472                if (retval) {
3473                        DPAA_SEC_ERR("Unable to initialize portal");
3474                        goto out;
3475                }
3476        }
3477
3478        /* Invoke PMD device initialization function */
3479        retval = dpaa_sec_dev_init(cryptodev);
3480        if (retval == 0)
3481                return 0;
3482
3483        retval = -ENXIO;
3484out:
3485        /* In case of error, cleanup is done */
3486        if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3487                rte_free(cryptodev->data->dev_private);
3488
3489        rte_cryptodev_pmd_release_device(cryptodev);
3490
3491        return retval;
3492}
3493
3494static int
3495cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3496{
3497        struct rte_cryptodev *cryptodev;
3498        int ret;
3499
3500        cryptodev = dpaa_dev->crypto_dev;
3501        if (cryptodev == NULL)
3502                return -ENODEV;
3503
3504        ret = dpaa_sec_uninit(cryptodev);
3505        if (ret)
3506                return ret;
3507
3508        return rte_cryptodev_pmd_destroy(cryptodev);
3509}
3510
3511static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3512        .drv_type = FSL_DPAA_CRYPTO,
3513        .driver = {
3514                .name = "DPAA SEC PMD"
3515        },
3516        .probe = cryptodev_dpaa_sec_probe,
3517        .remove = cryptodev_dpaa_sec_remove,
3518};
3519
3520static struct cryptodev_driver dpaa_sec_crypto_drv;
3521
3522RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3523RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3524                cryptodev_driver_id);
3525RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE);
3526