dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2016-2020 Intel Corporation
   3 */
   4
   5#include <rte_common.h>
   6#include <rte_hexdump.h>
   7#include <rte_cryptodev.h>
   8#include <rte_cryptodev_pmd.h>
   9#include <rte_bus_vdev.h>
  10#include <rte_malloc.h>
  11#include <rte_cpuflags.h>
  12#include <rte_byteorder.h>
  13
  14#include "aesni_gcm_pmd_private.h"
  15
  16static uint8_t cryptodev_driver_id;
  17
  18/* setup session handlers */
  19static void
  20set_func_ops(struct aesni_gcm_session *s, const struct aesni_gcm_ops *gcm_ops)
  21{
  22        s->ops.pre = gcm_ops->pre;
  23        s->ops.init = gcm_ops->init;
  24
  25        switch (s->op) {
  26        case AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION:
  27                s->ops.cipher = gcm_ops->enc;
  28                s->ops.update = gcm_ops->update_enc;
  29                s->ops.finalize = gcm_ops->finalize_enc;
  30                break;
  31        case AESNI_GCM_OP_AUTHENTICATED_DECRYPTION:
  32                s->ops.cipher = gcm_ops->dec;
  33                s->ops.update = gcm_ops->update_dec;
  34                s->ops.finalize = gcm_ops->finalize_dec;
  35                break;
  36        case AESNI_GMAC_OP_GENERATE:
  37        case AESNI_GMAC_OP_VERIFY:
  38                s->ops.finalize = gcm_ops->finalize_enc;
  39                break;
  40        }
  41}
  42
  43/** Parse crypto xform chain and set private session parameters */
  44int
  45aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
  46                struct aesni_gcm_session *sess,
  47                const struct rte_crypto_sym_xform *xform)
  48{
  49        const struct rte_crypto_sym_xform *auth_xform;
  50        const struct rte_crypto_sym_xform *aead_xform;
  51        uint8_t key_length;
  52        const uint8_t *key;
  53
  54        /* AES-GMAC */
  55        if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
  56                auth_xform = xform;
  57                if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC) {
  58                        AESNI_GCM_LOG(ERR, "Only AES GMAC is supported as an "
  59                                "authentication only algorithm");
  60                        return -ENOTSUP;
  61                }
  62                /* Set IV parameters */
  63                sess->iv.offset = auth_xform->auth.iv.offset;
  64                sess->iv.length = auth_xform->auth.iv.length;
  65
  66                /* Select Crypto operation */
  67                if (auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
  68                        sess->op = AESNI_GMAC_OP_GENERATE;
  69                else
  70                        sess->op = AESNI_GMAC_OP_VERIFY;
  71
  72                key_length = auth_xform->auth.key.length;
  73                key = auth_xform->auth.key.data;
  74                sess->req_digest_length = auth_xform->auth.digest_length;
  75
  76        /* AES-GCM */
  77        } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
  78                aead_xform = xform;
  79
  80                if (aead_xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM) {
  81                        AESNI_GCM_LOG(ERR, "The only combined operation "
  82                                                "supported is AES GCM");
  83                        return -ENOTSUP;
  84                }
  85
  86                /* Set IV parameters */
  87                sess->iv.offset = aead_xform->aead.iv.offset;
  88                sess->iv.length = aead_xform->aead.iv.length;
  89
  90                /* Select Crypto operation */
  91                if (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
  92                        sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
  93                /* op == RTE_CRYPTO_AEAD_OP_DECRYPT */
  94                else
  95                        sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
  96
  97                key_length = aead_xform->aead.key.length;
  98                key = aead_xform->aead.key.data;
  99
 100                sess->aad_length = aead_xform->aead.aad_length;
 101                sess->req_digest_length = aead_xform->aead.digest_length;
 102        } else {
 103                AESNI_GCM_LOG(ERR, "Wrong xform type, has to be AEAD or authentication");
 104                return -ENOTSUP;
 105        }
 106
 107        /* IV check */
 108        if (sess->iv.length != 16 && sess->iv.length != 12 &&
 109                        sess->iv.length != 0) {
 110                AESNI_GCM_LOG(ERR, "Wrong IV length");
 111                return -EINVAL;
 112        }
 113
 114        /* Check key length and calculate GCM pre-compute. */
 115        switch (key_length) {
 116        case 16:
 117                sess->key = GCM_KEY_128;
 118                break;
 119        case 24:
 120                sess->key = GCM_KEY_192;
 121                break;
 122        case 32:
 123                sess->key = GCM_KEY_256;
 124                break;
 125        default:
 126                AESNI_GCM_LOG(ERR, "Invalid key length");
 127                return -EINVAL;
 128        }
 129
 130        /* setup session handlers */
 131        set_func_ops(sess, &gcm_ops[sess->key]);
 132
 133        /* pre-generate key */
 134        gcm_ops[sess->key].pre(key, &sess->gdata_key);
 135
 136        /* Digest check */
 137        if (sess->req_digest_length > 16) {
 138                AESNI_GCM_LOG(ERR, "Invalid digest length");
 139                return -EINVAL;
 140        }
 141        /*
 142         * Multi-buffer lib supports digest sizes from 4 to 16 bytes
 143         * in version 0.50 and sizes of 8, 12 and 16 bytes,
 144         * in version 0.49.
 145         * If size requested is different, generate the full digest
 146         * (16 bytes) in a temporary location and then memcpy
 147         * the requested number of bytes.
 148         */
 149#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
 150        if (sess->req_digest_length < 4)
 151#else
 152        if (sess->req_digest_length != 16 &&
 153                        sess->req_digest_length != 12 &&
 154                        sess->req_digest_length != 8)
 155#endif
 156                sess->gen_digest_length = 16;
 157        else
 158                sess->gen_digest_length = sess->req_digest_length;
 159
 160        return 0;
 161}
 162
 163/** Get gcm session */
 164static struct aesni_gcm_session *
 165aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_op *op)
 166{
 167        struct aesni_gcm_session *sess = NULL;
 168        struct rte_crypto_sym_op *sym_op = op->sym;
 169
 170        if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
 171                if (likely(sym_op->session != NULL))
 172                        sess = (struct aesni_gcm_session *)
 173                                        get_sym_session_private_data(
 174                                        sym_op->session,
 175                                        cryptodev_driver_id);
 176        } else  {
 177                void *_sess;
 178                void *_sess_private_data = NULL;
 179
 180                if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
 181                        return NULL;
 182
 183                if (rte_mempool_get(qp->sess_mp_priv,
 184                                (void **)&_sess_private_data))
 185                        return NULL;
 186
 187                sess = (struct aesni_gcm_session *)_sess_private_data;
 188
 189                if (unlikely(aesni_gcm_set_session_parameters(qp->ops,
 190                                sess, sym_op->xform) != 0)) {
 191                        rte_mempool_put(qp->sess_mp, _sess);
 192                        rte_mempool_put(qp->sess_mp_priv, _sess_private_data);
 193                        sess = NULL;
 194                }
 195                sym_op->session = (struct rte_cryptodev_sym_session *)_sess;
 196                set_sym_session_private_data(sym_op->session,
 197                                cryptodev_driver_id, _sess_private_data);
 198        }
 199
 200        if (unlikely(sess == NULL))
 201                op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
 202
 203        return sess;
 204}
 205
 206/**
 207 * Process a crypto operation, calling
 208 * the GCM API from the multi buffer library.
 209 *
 210 * @param       qp              queue pair
 211 * @param       op              symmetric crypto operation
 212 * @param       session         GCM session
 213 *
 214 * @return
 215 *
 216 */
 217static int
 218process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
 219                struct aesni_gcm_session *session)
 220{
 221        uint8_t *src, *dst;
 222        uint8_t *iv_ptr;
 223        struct rte_crypto_sym_op *sym_op = op->sym;
 224        struct rte_mbuf *m_src = sym_op->m_src;
 225        uint32_t offset, data_offset, data_length;
 226        uint32_t part_len, total_len, data_len;
 227        uint8_t *tag;
 228        unsigned int oop = 0;
 229
 230        if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION ||
 231                        session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
 232                offset = sym_op->aead.data.offset;
 233                data_offset = offset;
 234                data_length = sym_op->aead.data.length;
 235        } else {
 236                offset = sym_op->auth.data.offset;
 237                data_offset = offset;
 238                data_length = sym_op->auth.data.length;
 239        }
 240
 241        RTE_ASSERT(m_src != NULL);
 242
 243        while (offset >= m_src->data_len && data_length != 0) {
 244                offset -= m_src->data_len;
 245                m_src = m_src->next;
 246
 247                RTE_ASSERT(m_src != NULL);
 248        }
 249
 250        src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
 251
 252        data_len = m_src->data_len - offset;
 253        part_len = (data_len < data_length) ? data_len :
 254                        data_length;
 255
 256        RTE_ASSERT((sym_op->m_dst == NULL) ||
 257                        ((sym_op->m_dst != NULL) &&
 258                                        rte_pktmbuf_is_contiguous(sym_op->m_dst)));
 259
 260        /* In-place */
 261        if (sym_op->m_dst == NULL || (sym_op->m_dst == sym_op->m_src))
 262                dst = src;
 263        /* Out-of-place */
 264        else {
 265                oop = 1;
 266                /* Segmented destination buffer is not supported if operation is
 267                 * Out-of-place */
 268                RTE_ASSERT(rte_pktmbuf_is_contiguous(sym_op->m_dst));
 269                dst = rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *,
 270                                        data_offset);
 271        }
 272
 273        iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
 274                                session->iv.offset);
 275
 276        if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) {
 277                qp->ops[session->key].init(&session->gdata_key,
 278                                &qp->gdata_ctx,
 279                                iv_ptr,
 280                                sym_op->aead.aad.data,
 281                                (uint64_t)session->aad_length);
 282
 283                qp->ops[session->key].update_enc(&session->gdata_key,
 284                                &qp->gdata_ctx, dst, src,
 285                                (uint64_t)part_len);
 286                total_len = data_length - part_len;
 287
 288                while (total_len) {
 289                        m_src = m_src->next;
 290
 291                        RTE_ASSERT(m_src != NULL);
 292
 293                        src = rte_pktmbuf_mtod(m_src, uint8_t *);
 294                        if (oop)
 295                                dst += part_len;
 296                        else
 297                                dst = src;
 298                        part_len = (m_src->data_len < total_len) ?
 299                                        m_src->data_len : total_len;
 300
 301                        qp->ops[session->key].update_enc(&session->gdata_key,
 302                                        &qp->gdata_ctx, dst, src,
 303                                        (uint64_t)part_len);
 304                        total_len -= part_len;
 305                }
 306
 307                if (session->req_digest_length != session->gen_digest_length)
 308                        tag = qp->temp_digest;
 309                else
 310                        tag = sym_op->aead.digest.data;
 311
 312                qp->ops[session->key].finalize_enc(&session->gdata_key,
 313                                &qp->gdata_ctx,
 314                                tag,
 315                                session->gen_digest_length);
 316        } else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
 317                qp->ops[session->key].init(&session->gdata_key,
 318                                &qp->gdata_ctx,
 319                                iv_ptr,
 320                                sym_op->aead.aad.data,
 321                                (uint64_t)session->aad_length);
 322
 323                qp->ops[session->key].update_dec(&session->gdata_key,
 324                                &qp->gdata_ctx, dst, src,
 325                                (uint64_t)part_len);
 326                total_len = data_length - part_len;
 327
 328                while (total_len) {
 329                        m_src = m_src->next;
 330
 331                        RTE_ASSERT(m_src != NULL);
 332
 333                        src = rte_pktmbuf_mtod(m_src, uint8_t *);
 334                        if (oop)
 335                                dst += part_len;
 336                        else
 337                                dst = src;
 338                        part_len = (m_src->data_len < total_len) ?
 339                                        m_src->data_len : total_len;
 340
 341                        qp->ops[session->key].update_dec(&session->gdata_key,
 342                                        &qp->gdata_ctx,
 343                                        dst, src,
 344                                        (uint64_t)part_len);
 345                        total_len -= part_len;
 346                }
 347
 348                tag = qp->temp_digest;
 349                qp->ops[session->key].finalize_dec(&session->gdata_key,
 350                                &qp->gdata_ctx,
 351                                tag,
 352                                session->gen_digest_length);
 353#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
 354        } else if (session->op == AESNI_GMAC_OP_GENERATE) {
 355                qp->ops[session->key].gmac_init(&session->gdata_key,
 356                                &qp->gdata_ctx,
 357                                iv_ptr,
 358                                session->iv.length);
 359
 360                qp->ops[session->key].gmac_update(&session->gdata_key,
 361                                &qp->gdata_ctx, src,
 362                                (uint64_t)part_len);
 363                total_len = data_length - part_len;
 364
 365                while (total_len) {
 366                        m_src = m_src->next;
 367
 368                        RTE_ASSERT(m_src != NULL);
 369
 370                        src = rte_pktmbuf_mtod(m_src, uint8_t *);
 371                        part_len = (m_src->data_len < total_len) ?
 372                                        m_src->data_len : total_len;
 373
 374                        qp->ops[session->key].gmac_update(&session->gdata_key,
 375                                        &qp->gdata_ctx, src,
 376                                        (uint64_t)part_len);
 377                        total_len -= part_len;
 378                }
 379
 380                if (session->req_digest_length != session->gen_digest_length)
 381                        tag = qp->temp_digest;
 382                else
 383                        tag = sym_op->auth.digest.data;
 384
 385                qp->ops[session->key].gmac_finalize(&session->gdata_key,
 386                                &qp->gdata_ctx,
 387                                tag,
 388                                session->gen_digest_length);
 389        } else { /* AESNI_GMAC_OP_VERIFY */
 390                qp->ops[session->key].gmac_init(&session->gdata_key,
 391                                &qp->gdata_ctx,
 392                                iv_ptr,
 393                                session->iv.length);
 394
 395                qp->ops[session->key].gmac_update(&session->gdata_key,
 396                                &qp->gdata_ctx, src,
 397                                (uint64_t)part_len);
 398                total_len = data_length - part_len;
 399
 400                while (total_len) {
 401                        m_src = m_src->next;
 402
 403                        RTE_ASSERT(m_src != NULL);
 404
 405                        src = rte_pktmbuf_mtod(m_src, uint8_t *);
 406                        part_len = (m_src->data_len < total_len) ?
 407                                        m_src->data_len : total_len;
 408
 409                        qp->ops[session->key].gmac_update(&session->gdata_key,
 410                                        &qp->gdata_ctx, src,
 411                                        (uint64_t)part_len);
 412                        total_len -= part_len;
 413                }
 414
 415                tag = qp->temp_digest;
 416
 417                qp->ops[session->key].gmac_finalize(&session->gdata_key,
 418                                &qp->gdata_ctx,
 419                                tag,
 420                                session->gen_digest_length);
 421        }
 422#else
 423        } else if (session->op == AESNI_GMAC_OP_GENERATE) {
 424                qp->ops[session->key].init(&session->gdata_key,
 425                                &qp->gdata_ctx,
 426                                iv_ptr,
 427                                src,
 428                                (uint64_t)data_length);
 429                if (session->req_digest_length != session->gen_digest_length)
 430                        tag = qp->temp_digest;
 431                else
 432                        tag = sym_op->auth.digest.data;
 433                qp->ops[session->key].finalize_enc(&session->gdata_key,
 434                                &qp->gdata_ctx,
 435                                tag,
 436                                session->gen_digest_length);
 437        } else { /* AESNI_GMAC_OP_VERIFY */
 438                qp->ops[session->key].init(&session->gdata_key,
 439                                &qp->gdata_ctx,
 440                                iv_ptr,
 441                                src,
 442                                (uint64_t)data_length);
 443
 444                /*
 445                 * Generate always 16 bytes and later compare only
 446                 * the bytes passed.
 447                 */
 448                tag = qp->temp_digest;
 449                qp->ops[session->key].finalize_enc(&session->gdata_key,
 450                                &qp->gdata_ctx,
 451                                tag,
 452                                session->gen_digest_length);
 453        }
 454#endif
 455
 456        return 0;
 457}
 458
 459static inline void
 460aesni_gcm_fill_error_code(struct rte_crypto_sym_vec *vec, int32_t errnum)
 461{
 462        uint32_t i;
 463
 464        for (i = 0; i < vec->num; i++)
 465                vec->status[i] = errnum;
 466}
 467
 468
 469static inline int32_t
 470aesni_gcm_sgl_op_finalize_encryption(const struct aesni_gcm_session *s,
 471        struct gcm_context_data *gdata_ctx, uint8_t *digest)
 472{
 473        if (s->req_digest_length != s->gen_digest_length) {
 474                uint8_t tmpdigest[s->gen_digest_length];
 475
 476                s->ops.finalize(&s->gdata_key, gdata_ctx, tmpdigest,
 477                        s->gen_digest_length);
 478                memcpy(digest, tmpdigest, s->req_digest_length);
 479        } else {
 480                s->ops.finalize(&s->gdata_key, gdata_ctx, digest,
 481                        s->gen_digest_length);
 482        }
 483
 484        return 0;
 485}
 486
 487static inline int32_t
 488aesni_gcm_sgl_op_finalize_decryption(const struct aesni_gcm_session *s,
 489        struct gcm_context_data *gdata_ctx, uint8_t *digest)
 490{
 491        uint8_t tmpdigest[s->gen_digest_length];
 492
 493        s->ops.finalize(&s->gdata_key, gdata_ctx, tmpdigest,
 494                s->gen_digest_length);
 495
 496        return memcmp(digest, tmpdigest, s->req_digest_length) == 0 ? 0 :
 497                EBADMSG;
 498}
 499
 500static inline void
 501aesni_gcm_process_gcm_sgl_op(const struct aesni_gcm_session *s,
 502        struct gcm_context_data *gdata_ctx, struct rte_crypto_sgl *sgl,
 503        void *iv, void *aad)
 504{
 505        uint32_t i;
 506
 507        /* init crypto operation */
 508        s->ops.init(&s->gdata_key, gdata_ctx, iv, aad,
 509                (uint64_t)s->aad_length);
 510
 511        /* update with sgl data */
 512        for (i = 0; i < sgl->num; i++) {
 513                struct rte_crypto_vec *vec = &sgl->vec[i];
 514
 515                s->ops.update(&s->gdata_key, gdata_ctx, vec->base, vec->base,
 516                        vec->len);
 517        }
 518}
 519
 520static inline void
 521aesni_gcm_process_gmac_sgl_op(const struct aesni_gcm_session *s,
 522        struct gcm_context_data *gdata_ctx, struct rte_crypto_sgl *sgl,
 523        void *iv)
 524{
 525        s->ops.init(&s->gdata_key, gdata_ctx, iv, sgl->vec[0].base,
 526                sgl->vec[0].len);
 527}
 528
 529static inline uint32_t
 530aesni_gcm_sgl_encrypt(struct aesni_gcm_session *s,
 531        struct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec)
 532{
 533        uint32_t i, processed;
 534
 535        processed = 0;
 536        for (i = 0; i < vec->num; ++i) {
 537                aesni_gcm_process_gcm_sgl_op(s, gdata_ctx,
 538                        &vec->sgl[i], vec->iv[i].va,
 539                        vec->aad[i].va);
 540                vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s,
 541                        gdata_ctx, vec->digest[i].va);
 542                processed += (vec->status[i] == 0);
 543        }
 544
 545        return processed;
 546}
 547
 548static inline uint32_t
 549aesni_gcm_sgl_decrypt(struct aesni_gcm_session *s,
 550        struct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec)
 551{
 552        uint32_t i, processed;
 553
 554        processed = 0;
 555        for (i = 0; i < vec->num; ++i) {
 556                aesni_gcm_process_gcm_sgl_op(s, gdata_ctx,
 557                        &vec->sgl[i], vec->iv[i].va,
 558                        vec->aad[i].va);
 559                 vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s,
 560                        gdata_ctx, vec->digest[i].va);
 561                processed += (vec->status[i] == 0);
 562        }
 563
 564        return processed;
 565}
 566
 567static inline uint32_t
 568aesni_gmac_sgl_generate(struct aesni_gcm_session *s,
 569        struct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec)
 570{
 571        uint32_t i, processed;
 572
 573        processed = 0;
 574        for (i = 0; i < vec->num; ++i) {
 575                if (vec->sgl[i].num != 1) {
 576                        vec->status[i] = ENOTSUP;
 577                        continue;
 578                }
 579
 580                aesni_gcm_process_gmac_sgl_op(s, gdata_ctx,
 581                        &vec->sgl[i], vec->iv[i].va);
 582                vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s,
 583                        gdata_ctx, vec->digest[i].va);
 584                processed += (vec->status[i] == 0);
 585        }
 586
 587        return processed;
 588}
 589
 590static inline uint32_t
 591aesni_gmac_sgl_verify(struct aesni_gcm_session *s,
 592        struct gcm_context_data *gdata_ctx, struct rte_crypto_sym_vec *vec)
 593{
 594        uint32_t i, processed;
 595
 596        processed = 0;
 597        for (i = 0; i < vec->num; ++i) {
 598                if (vec->sgl[i].num != 1) {
 599                        vec->status[i] = ENOTSUP;
 600                        continue;
 601                }
 602
 603                aesni_gcm_process_gmac_sgl_op(s, gdata_ctx,
 604                        &vec->sgl[i], vec->iv[i].va);
 605                vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s,
 606                        gdata_ctx, vec->digest[i].va);
 607                processed += (vec->status[i] == 0);
 608        }
 609
 610        return processed;
 611}
 612
 613/** Process CPU crypto bulk operations */
 614uint32_t
 615aesni_gcm_pmd_cpu_crypto_process(struct rte_cryptodev *dev,
 616        struct rte_cryptodev_sym_session *sess,
 617        __rte_unused union rte_crypto_sym_ofs ofs,
 618        struct rte_crypto_sym_vec *vec)
 619{
 620        void *sess_priv;
 621        struct aesni_gcm_session *s;
 622        struct gcm_context_data gdata_ctx;
 623
 624        sess_priv = get_sym_session_private_data(sess, dev->driver_id);
 625        if (unlikely(sess_priv == NULL)) {
 626                aesni_gcm_fill_error_code(vec, EINVAL);
 627                return 0;
 628        }
 629
 630        s = sess_priv;
 631        switch (s->op) {
 632        case AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION:
 633                return aesni_gcm_sgl_encrypt(s, &gdata_ctx, vec);
 634        case AESNI_GCM_OP_AUTHENTICATED_DECRYPTION:
 635                return aesni_gcm_sgl_decrypt(s, &gdata_ctx, vec);
 636        case AESNI_GMAC_OP_GENERATE:
 637                return aesni_gmac_sgl_generate(s, &gdata_ctx, vec);
 638        case AESNI_GMAC_OP_VERIFY:
 639                return aesni_gmac_sgl_verify(s, &gdata_ctx, vec);
 640        default:
 641                aesni_gcm_fill_error_code(vec, EINVAL);
 642                return 0;
 643        }
 644}
 645
 646/**
 647 * Process a completed job and return rte_mbuf which job processed
 648 *
 649 * @param job   JOB_AES_HMAC job to process
 650 *
 651 * @return
 652 * - Returns processed mbuf which is trimmed of output digest used in
 653 * verification of supplied digest in the case of a HASH_CIPHER operation
 654 * - Returns NULL on invalid job
 655 */
 656static void
 657post_process_gcm_crypto_op(struct aesni_gcm_qp *qp,
 658                struct rte_crypto_op *op,
 659                struct aesni_gcm_session *session)
 660{
 661        op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
 662
 663        /* Verify digest if required */
 664        if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION ||
 665                        session->op == AESNI_GMAC_OP_VERIFY) {
 666                uint8_t *digest;
 667
 668                uint8_t *tag = qp->temp_digest;
 669
 670                if (session->op == AESNI_GMAC_OP_VERIFY)
 671                        digest = op->sym->auth.digest.data;
 672                else
 673                        digest = op->sym->aead.digest.data;
 674
 675#ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
 676                rte_hexdump(stdout, "auth tag (orig):",
 677                                digest, session->req_digest_length);
 678                rte_hexdump(stdout, "auth tag (calc):",
 679                                tag, session->req_digest_length);
 680#endif
 681
 682                if (memcmp(tag, digest, session->req_digest_length) != 0)
 683                        op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
 684        } else {
 685                if (session->req_digest_length != session->gen_digest_length) {
 686                        if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION)
 687                                memcpy(op->sym->aead.digest.data, qp->temp_digest,
 688                                                session->req_digest_length);
 689                        else
 690                                memcpy(op->sym->auth.digest.data, qp->temp_digest,
 691                                                session->req_digest_length);
 692                }
 693        }
 694}
 695
 696/**
 697 * Process a completed GCM request
 698 *
 699 * @param qp            Queue Pair to process
 700 * @param op            Crypto operation
 701 * @param job           JOB_AES_HMAC job
 702 *
 703 * @return
 704 * - Number of processed jobs
 705 */
 706static void
 707handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp,
 708                struct rte_crypto_op *op,
 709                struct aesni_gcm_session *sess)
 710{
 711        post_process_gcm_crypto_op(qp, op, sess);
 712
 713        /* Free session if a session-less crypto op */
 714        if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
 715                memset(sess, 0, sizeof(struct aesni_gcm_session));
 716                memset(op->sym->session, 0,
 717                        rte_cryptodev_sym_get_existing_header_session_size(
 718                                op->sym->session));
 719                rte_mempool_put(qp->sess_mp_priv, sess);
 720                rte_mempool_put(qp->sess_mp, op->sym->session);
 721                op->sym->session = NULL;
 722        }
 723}
 724
 725static uint16_t
 726aesni_gcm_pmd_dequeue_burst(void *queue_pair,
 727                struct rte_crypto_op **ops, uint16_t nb_ops)
 728{
 729        struct aesni_gcm_session *sess;
 730        struct aesni_gcm_qp *qp = queue_pair;
 731
 732        int retval = 0;
 733        unsigned int i, nb_dequeued;
 734
 735        nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
 736                        (void **)ops, nb_ops, NULL);
 737
 738        for (i = 0; i < nb_dequeued; i++) {
 739
 740                sess = aesni_gcm_get_session(qp, ops[i]);
 741                if (unlikely(sess == NULL)) {
 742                        ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
 743                        qp->qp_stats.dequeue_err_count++;
 744                        break;
 745                }
 746
 747                retval = process_gcm_crypto_op(qp, ops[i], sess);
 748                if (retval < 0) {
 749                        ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
 750                        qp->qp_stats.dequeue_err_count++;
 751                        break;
 752                }
 753
 754                handle_completed_gcm_crypto_op(qp, ops[i], sess);
 755        }
 756
 757        qp->qp_stats.dequeued_count += i;
 758
 759        return i;
 760}
 761
 762static uint16_t
 763aesni_gcm_pmd_enqueue_burst(void *queue_pair,
 764                struct rte_crypto_op **ops, uint16_t nb_ops)
 765{
 766        struct aesni_gcm_qp *qp = queue_pair;
 767
 768        unsigned int nb_enqueued;
 769
 770        nb_enqueued = rte_ring_enqueue_burst(qp->processed_pkts,
 771                        (void **)ops, nb_ops, NULL);
 772        qp->qp_stats.enqueued_count += nb_enqueued;
 773
 774        return nb_enqueued;
 775}
 776
 777static int aesni_gcm_remove(struct rte_vdev_device *vdev);
 778
 779static int
 780aesni_gcm_create(const char *name,
 781                struct rte_vdev_device *vdev,
 782                struct rte_cryptodev_pmd_init_params *init_params)
 783{
 784        struct rte_cryptodev *dev;
 785        struct aesni_gcm_private *internals;
 786        enum aesni_gcm_vector_mode vector_mode;
 787        MB_MGR *mb_mgr;
 788
 789        dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
 790        if (dev == NULL) {
 791                AESNI_GCM_LOG(ERR, "driver %s: create failed",
 792                        init_params->name);
 793                return -ENODEV;
 794        }
 795
 796        /* Check CPU for supported vector instruction set */
 797        if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
 798                vector_mode = RTE_AESNI_GCM_AVX512;
 799        else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
 800                vector_mode = RTE_AESNI_GCM_AVX2;
 801        else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
 802                vector_mode = RTE_AESNI_GCM_AVX;
 803        else
 804                vector_mode = RTE_AESNI_GCM_SSE;
 805
 806        dev->driver_id = cryptodev_driver_id;
 807        dev->dev_ops = rte_aesni_gcm_pmd_ops;
 808
 809        /* register rx/tx burst functions for data path */
 810        dev->dequeue_burst = aesni_gcm_pmd_dequeue_burst;
 811        dev->enqueue_burst = aesni_gcm_pmd_enqueue_burst;
 812
 813        dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
 814                        RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
 815                        RTE_CRYPTODEV_FF_IN_PLACE_SGL |
 816                        RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
 817                        RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
 818                        RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO |
 819                        RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
 820
 821        /* Check CPU for support for AES instruction set */
 822        if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES))
 823                dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AESNI;
 824        else
 825                AESNI_GCM_LOG(WARNING, "AES instructions not supported by CPU");
 826
 827        mb_mgr = alloc_mb_mgr(0);
 828        if (mb_mgr == NULL)
 829                return -ENOMEM;
 830
 831        switch (vector_mode) {
 832        case RTE_AESNI_GCM_SSE:
 833                dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
 834                init_mb_mgr_sse(mb_mgr);
 835                break;
 836        case RTE_AESNI_GCM_AVX:
 837                dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
 838                init_mb_mgr_avx(mb_mgr);
 839                break;
 840        case RTE_AESNI_GCM_AVX2:
 841                dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
 842                init_mb_mgr_avx2(mb_mgr);
 843                break;
 844        case RTE_AESNI_GCM_AVX512:
 845                dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
 846                init_mb_mgr_avx512(mb_mgr);
 847                break;
 848        default:
 849                AESNI_GCM_LOG(ERR, "Unsupported vector mode %u\n", vector_mode);
 850                goto error_exit;
 851        }
 852
 853        internals = dev->data->dev_private;
 854
 855        internals->vector_mode = vector_mode;
 856        internals->mb_mgr = mb_mgr;
 857
 858        /* Set arch independent function pointers, based on key size */
 859        internals->ops[GCM_KEY_128].enc = mb_mgr->gcm128_enc;
 860        internals->ops[GCM_KEY_128].dec = mb_mgr->gcm128_dec;
 861        internals->ops[GCM_KEY_128].pre = mb_mgr->gcm128_pre;
 862        internals->ops[GCM_KEY_128].init = mb_mgr->gcm128_init;
 863        internals->ops[GCM_KEY_128].update_enc = mb_mgr->gcm128_enc_update;
 864        internals->ops[GCM_KEY_128].update_dec = mb_mgr->gcm128_dec_update;
 865        internals->ops[GCM_KEY_128].finalize_enc = mb_mgr->gcm128_enc_finalize;
 866        internals->ops[GCM_KEY_128].finalize_dec = mb_mgr->gcm128_dec_finalize;
 867#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
 868        internals->ops[GCM_KEY_128].gmac_init = mb_mgr->gmac128_init;
 869        internals->ops[GCM_KEY_128].gmac_update = mb_mgr->gmac128_update;
 870        internals->ops[GCM_KEY_128].gmac_finalize = mb_mgr->gmac128_finalize;
 871#endif
 872
 873        internals->ops[GCM_KEY_192].enc = mb_mgr->gcm192_enc;
 874        internals->ops[GCM_KEY_192].dec = mb_mgr->gcm192_dec;
 875        internals->ops[GCM_KEY_192].pre = mb_mgr->gcm192_pre;
 876        internals->ops[GCM_KEY_192].init = mb_mgr->gcm192_init;
 877        internals->ops[GCM_KEY_192].update_enc = mb_mgr->gcm192_enc_update;
 878        internals->ops[GCM_KEY_192].update_dec = mb_mgr->gcm192_dec_update;
 879        internals->ops[GCM_KEY_192].finalize_enc = mb_mgr->gcm192_enc_finalize;
 880        internals->ops[GCM_KEY_192].finalize_dec = mb_mgr->gcm192_dec_finalize;
 881#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
 882        internals->ops[GCM_KEY_192].gmac_init = mb_mgr->gmac192_init;
 883        internals->ops[GCM_KEY_192].gmac_update = mb_mgr->gmac192_update;
 884        internals->ops[GCM_KEY_192].gmac_finalize = mb_mgr->gmac192_finalize;
 885#endif
 886
 887        internals->ops[GCM_KEY_256].enc = mb_mgr->gcm256_enc;
 888        internals->ops[GCM_KEY_256].dec = mb_mgr->gcm256_dec;
 889        internals->ops[GCM_KEY_256].pre = mb_mgr->gcm256_pre;
 890        internals->ops[GCM_KEY_256].init = mb_mgr->gcm256_init;
 891        internals->ops[GCM_KEY_256].update_enc = mb_mgr->gcm256_enc_update;
 892        internals->ops[GCM_KEY_256].update_dec = mb_mgr->gcm256_dec_update;
 893        internals->ops[GCM_KEY_256].finalize_enc = mb_mgr->gcm256_enc_finalize;
 894        internals->ops[GCM_KEY_256].finalize_dec = mb_mgr->gcm256_dec_finalize;
 895#if IMB_VERSION(0, 54, 0) < IMB_VERSION_NUM
 896        internals->ops[GCM_KEY_256].gmac_init = mb_mgr->gmac256_init;
 897        internals->ops[GCM_KEY_256].gmac_update = mb_mgr->gmac256_update;
 898        internals->ops[GCM_KEY_256].gmac_finalize = mb_mgr->gmac256_finalize;
 899#endif
 900
 901        internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
 902
 903#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
 904        AESNI_GCM_LOG(INFO, "IPSec Multi-buffer library version used: %s\n",
 905                        imb_get_version_str());
 906#else
 907        AESNI_GCM_LOG(INFO, "IPSec Multi-buffer library version used: 0.49.0\n");
 908#endif
 909
 910        return 0;
 911
 912error_exit:
 913        if (mb_mgr)
 914                free_mb_mgr(mb_mgr);
 915
 916        rte_cryptodev_pmd_destroy(dev);
 917
 918        return -1;
 919}
 920
 921static int
 922aesni_gcm_probe(struct rte_vdev_device *vdev)
 923{
 924        struct rte_cryptodev_pmd_init_params init_params = {
 925                "",
 926                sizeof(struct aesni_gcm_private),
 927                rte_socket_id(),
 928                RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
 929        };
 930        const char *name;
 931        const char *input_args;
 932
 933        name = rte_vdev_device_name(vdev);
 934        if (name == NULL)
 935                return -EINVAL;
 936        input_args = rte_vdev_device_args(vdev);
 937        rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
 938
 939        return aesni_gcm_create(name, vdev, &init_params);
 940}
 941
 942static int
 943aesni_gcm_remove(struct rte_vdev_device *vdev)
 944{
 945        struct rte_cryptodev *cryptodev;
 946        struct aesni_gcm_private *internals;
 947        const char *name;
 948
 949        name = rte_vdev_device_name(vdev);
 950        if (name == NULL)
 951                return -EINVAL;
 952
 953        cryptodev = rte_cryptodev_pmd_get_named_dev(name);
 954        if (cryptodev == NULL)
 955                return -ENODEV;
 956
 957        internals = cryptodev->data->dev_private;
 958
 959        free_mb_mgr(internals->mb_mgr);
 960
 961        return rte_cryptodev_pmd_destroy(cryptodev);
 962}
 963
 964static struct rte_vdev_driver aesni_gcm_pmd_drv = {
 965        .probe = aesni_gcm_probe,
 966        .remove = aesni_gcm_remove
 967};
 968
 969static struct cryptodev_driver aesni_gcm_crypto_drv;
 970
 971RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_GCM_PMD, aesni_gcm_pmd_drv);
 972RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_GCM_PMD, cryptodev_aesni_gcm_pmd);
 973RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,
 974        "max_nb_queue_pairs=<int> "
 975        "socket_id=<int>");
 976RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_crypto_drv, aesni_gcm_pmd_drv.driver,
 977                cryptodev_driver_id);
 978RTE_LOG_REGISTER(aesni_gcm_logtype_driver, pmd.crypto.aesni_gcm, NOTICE);
 979