linux/drivers/crypto/nx/nx-aes-ccm.c
<<
>>
Prefs
   1/**
   2 * AES CCM routines supporting the Power 7+ Nest Accelerators driver
   3 *
   4 * Copyright (C) 2012 International Business Machines Inc.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; version 2 only.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  18 *
  19 * Author: Kent Yoder <yoder1@us.ibm.com>
  20 */
  21
  22#include <crypto/internal/aead.h>
  23#include <crypto/aes.h>
  24#include <crypto/algapi.h>
  25#include <crypto/scatterwalk.h>
  26#include <linux/module.h>
  27#include <linux/types.h>
  28#include <linux/crypto.h>
  29#include <asm/vio.h>
  30
  31#include "nx_csbcpb.h"
  32#include "nx.h"
  33
  34
  35static int ccm_aes_nx_set_key(struct crypto_aead *tfm,
  36                              const u8           *in_key,
  37                              unsigned int        key_len)
  38{
  39        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
  40        struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
  41        struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
  42
  43        nx_ctx_init(nx_ctx, HCOP_FC_AES);
  44
  45        switch (key_len) {
  46        case AES_KEYSIZE_128:
  47                NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
  48                NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128);
  49                nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
  50                break;
  51        default:
  52                return -EINVAL;
  53        }
  54
  55        csbcpb->cpb.hdr.mode = NX_MODE_AES_CCM;
  56        memcpy(csbcpb->cpb.aes_ccm.key, in_key, key_len);
  57
  58        csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_CCA;
  59        memcpy(csbcpb_aead->cpb.aes_cca.key, in_key, key_len);
  60
  61        return 0;
  62
  63}
  64
  65static int ccm4309_aes_nx_set_key(struct crypto_aead *tfm,
  66                                  const u8           *in_key,
  67                                  unsigned int        key_len)
  68{
  69        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
  70
  71        if (key_len < 3)
  72                return -EINVAL;
  73
  74        key_len -= 3;
  75
  76        memcpy(nx_ctx->priv.ccm.nonce, in_key + key_len, 3);
  77
  78        return ccm_aes_nx_set_key(tfm, in_key, key_len);
  79}
  80
  81static int ccm_aes_nx_setauthsize(struct crypto_aead *tfm,
  82                                  unsigned int authsize)
  83{
  84        switch (authsize) {
  85        case 4:
  86        case 6:
  87        case 8:
  88        case 10:
  89        case 12:
  90        case 14:
  91        case 16:
  92                break;
  93        default:
  94                return -EINVAL;
  95        }
  96
  97        crypto_aead_crt(tfm)->authsize = authsize;
  98
  99        return 0;
 100}
 101
 102static int ccm4309_aes_nx_setauthsize(struct crypto_aead *tfm,
 103                                      unsigned int authsize)
 104{
 105        switch (authsize) {
 106        case 8:
 107        case 12:
 108        case 16:
 109                break;
 110        default:
 111                return -EINVAL;
 112        }
 113
 114        crypto_aead_crt(tfm)->authsize = authsize;
 115
 116        return 0;
 117}
 118
 119/* taken from crypto/ccm.c */
 120static int set_msg_len(u8 *block, unsigned int msglen, int csize)
 121{
 122        __be32 data;
 123
 124        memset(block, 0, csize);
 125        block += csize;
 126
 127        if (csize >= 4)
 128                csize = 4;
 129        else if (msglen > (unsigned int)(1 << (8 * csize)))
 130                return -EOVERFLOW;
 131
 132        data = cpu_to_be32(msglen);
 133        memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
 134
 135        return 0;
 136}
 137
 138/* taken from crypto/ccm.c */
 139static inline int crypto_ccm_check_iv(const u8 *iv)
 140{
 141        /* 2 <= L <= 8, so 1 <= L' <= 7. */
 142        if (1 > iv[0] || iv[0] > 7)
 143                return -EINVAL;
 144
 145        return 0;
 146}
 147
 148/* based on code from crypto/ccm.c */
 149static int generate_b0(u8 *iv, unsigned int assoclen, unsigned int authsize,
 150                       unsigned int cryptlen, u8 *b0)
 151{
 152        unsigned int l, lp, m = authsize;
 153        int rc;
 154
 155        memcpy(b0, iv, 16);
 156
 157        lp = b0[0];
 158        l = lp + 1;
 159
 160        /* set m, bits 3-5 */
 161        *b0 |= (8 * ((m - 2) / 2));
 162
 163        /* set adata, bit 6, if associated data is used */
 164        if (assoclen)
 165                *b0 |= 64;
 166
 167        rc = set_msg_len(b0 + 16 - l, cryptlen, l);
 168
 169        return rc;
 170}
 171
 172static int generate_pat(u8                   *iv,
 173                        struct aead_request  *req,
 174                        struct nx_crypto_ctx *nx_ctx,
 175                        unsigned int          authsize,
 176                        unsigned int          nbytes,
 177                        u8                   *out)
 178{
 179        struct nx_sg *nx_insg = nx_ctx->in_sg;
 180        struct nx_sg *nx_outsg = nx_ctx->out_sg;
 181        unsigned int iauth_len = 0;
 182        u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL;
 183        int rc;
 184        unsigned int max_sg_len;
 185
 186        /* zero the ctr value */
 187        memset(iv + 15 - iv[0], 0, iv[0] + 1);
 188
 189        /* page 78 of nx_wb.pdf has,
 190         * Note: RFC3610 allows the AAD data to be up to 2^64 -1 bytes
 191         * in length. If a full message is used, the AES CCA implementation
 192         * restricts the maximum AAD length to 2^32 -1 bytes.
 193         * If partial messages are used, the implementation supports
 194         * 2^64 -1 bytes maximum AAD length.
 195         *
 196         * However, in the cryptoapi's aead_request structure,
 197         * assoclen is an unsigned int, thus it cannot hold a length
 198         * value greater than 2^32 - 1.
 199         * Thus the AAD is further constrained by this and is never
 200         * greater than 2^32.
 201         */
 202
 203        if (!req->assoclen) {
 204                b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
 205        } else if (req->assoclen <= 14) {
 206                /* if associated data is 14 bytes or less, we do 1 GCM
 207                 * operation on 2 AES blocks, B0 (stored in the csbcpb) and B1,
 208                 * which is fed in through the source buffers here */
 209                b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
 210                b1 = nx_ctx->priv.ccm.iauth_tag;
 211                iauth_len = req->assoclen;
 212        } else if (req->assoclen <= 65280) {
 213                /* if associated data is less than (2^16 - 2^8), we construct
 214                 * B1 differently and feed in the associated data to a CCA
 215                 * operation */
 216                b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
 217                b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
 218                iauth_len = 14;
 219        } else {
 220                b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
 221                b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
 222                iauth_len = 10;
 223        }
 224
 225        /* generate B0 */
 226        rc = generate_b0(iv, req->assoclen, authsize, nbytes, b0);
 227        if (rc)
 228                return rc;
 229
 230        /* generate B1:
 231         * add control info for associated data
 232         * RFC 3610 and NIST Special Publication 800-38C
 233         */
 234        if (b1) {
 235                memset(b1, 0, 16);
 236                if (req->assoclen <= 65280) {
 237                        *(u16 *)b1 = (u16)req->assoclen;
 238                        scatterwalk_map_and_copy(b1 + 2, req->assoc, 0,
 239                                         iauth_len, SCATTERWALK_FROM_SG);
 240                } else {
 241                        *(u16 *)b1 = (u16)(0xfffe);
 242                        *(u32 *)&b1[2] = (u32)req->assoclen;
 243                        scatterwalk_map_and_copy(b1 + 6, req->assoc, 0,
 244                                         iauth_len, SCATTERWALK_FROM_SG);
 245                }
 246        }
 247
 248        /* now copy any remaining AAD to scatterlist and call nx... */
 249        if (!req->assoclen) {
 250                return rc;
 251        } else if (req->assoclen <= 14) {
 252                unsigned int len = 16;
 253
 254                nx_insg = nx_build_sg_list(nx_insg, b1, &len, nx_ctx->ap->sglen);
 255
 256                if (len != 16)
 257                        return -EINVAL;
 258
 259                nx_outsg = nx_build_sg_list(nx_outsg, tmp, &len,
 260                                            nx_ctx->ap->sglen);
 261
 262                if (len != 16)
 263                        return -EINVAL;
 264
 265                /* inlen should be negative, indicating to phyp that its a
 266                 * pointer to an sg list */
 267                nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) *
 268                                        sizeof(struct nx_sg);
 269                nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) *
 270                                        sizeof(struct nx_sg);
 271
 272                NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_ENDE_ENCRYPT;
 273                NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_INTERMEDIATE;
 274
 275                result = nx_ctx->csbcpb->cpb.aes_ccm.out_pat_or_mac;
 276
 277                rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
 278                                   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
 279                if (rc)
 280                        return rc;
 281
 282                atomic_inc(&(nx_ctx->stats->aes_ops));
 283                atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
 284
 285        } else {
 286                unsigned int processed = 0, to_process;
 287
 288                processed += iauth_len;
 289
 290                /* page_limit: number of sg entries that fit on one page */
 291                max_sg_len = min_t(u64, nx_ctx->ap->sglen,
 292                                nx_driver.of.max_sg_len/sizeof(struct nx_sg));
 293                max_sg_len = min_t(u64, max_sg_len,
 294                                nx_ctx->ap->databytelen/NX_PAGE_SIZE);
 295
 296                do {
 297                        to_process = min_t(u32, req->assoclen - processed,
 298                                           nx_ctx->ap->databytelen);
 299
 300                        nx_insg = nx_walk_and_build(nx_ctx->in_sg,
 301                                                    nx_ctx->ap->sglen,
 302                                                    req->assoc, processed,
 303                                                    &to_process);
 304
 305                        if ((to_process + processed) < req->assoclen) {
 306                                NX_CPB_FDM(nx_ctx->csbcpb_aead) |=
 307                                        NX_FDM_INTERMEDIATE;
 308                        } else {
 309                                NX_CPB_FDM(nx_ctx->csbcpb_aead) &=
 310                                        ~NX_FDM_INTERMEDIATE;
 311                        }
 312
 313
 314                        nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) *
 315                                                sizeof(struct nx_sg);
 316
 317                        result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
 318
 319                        rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
 320                                   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
 321                        if (rc)
 322                                return rc;
 323
 324                        memcpy(nx_ctx->csbcpb_aead->cpb.aes_cca.b0,
 325                                nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0,
 326                                AES_BLOCK_SIZE);
 327
 328                        NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION;
 329
 330                        atomic_inc(&(nx_ctx->stats->aes_ops));
 331                        atomic64_add(req->assoclen,
 332                                        &(nx_ctx->stats->aes_bytes));
 333
 334                        processed += to_process;
 335                } while (processed < req->assoclen);
 336
 337                result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
 338        }
 339
 340        memcpy(out, result, AES_BLOCK_SIZE);
 341
 342        return rc;
 343}
 344
 345static int ccm_nx_decrypt(struct aead_request   *req,
 346                          struct blkcipher_desc *desc)
 347{
 348        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
 349        struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
 350        unsigned int nbytes = req->cryptlen;
 351        unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
 352        struct nx_ccm_priv *priv = &nx_ctx->priv.ccm;
 353        unsigned long irq_flags;
 354        unsigned int processed = 0, to_process;
 355        int rc = -1;
 356
 357        spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 358
 359        nbytes -= authsize;
 360
 361        /* copy out the auth tag to compare with later */
 362        scatterwalk_map_and_copy(priv->oauth_tag,
 363                                 req->src, nbytes, authsize,
 364                                 SCATTERWALK_FROM_SG);
 365
 366        rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
 367                          csbcpb->cpb.aes_ccm.in_pat_or_b0);
 368        if (rc)
 369                goto out;
 370
 371        do {
 372
 373                /* to_process: the AES_BLOCK_SIZE data chunk to process in this
 374                 * update. This value is bound by sg list limits.
 375                 */
 376                to_process = nbytes - processed;
 377
 378                if ((to_process + processed) < nbytes)
 379                        NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
 380                else
 381                        NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
 382
 383                NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
 384
 385                rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
 386                                        &to_process, processed,
 387                                        csbcpb->cpb.aes_ccm.iv_or_ctr);
 388                if (rc)
 389                        goto out;
 390
 391                rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
 392                           req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
 393                if (rc)
 394                        goto out;
 395
 396                /* for partial completion, copy following for next
 397                 * entry into loop...
 398                 */
 399                memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
 400                memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
 401                        csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
 402                memcpy(csbcpb->cpb.aes_ccm.in_s0,
 403                        csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);
 404
 405                NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
 406
 407                /* update stats */
 408                atomic_inc(&(nx_ctx->stats->aes_ops));
 409                atomic64_add(csbcpb->csb.processed_byte_count,
 410                             &(nx_ctx->stats->aes_bytes));
 411
 412                processed += to_process;
 413        } while (processed < nbytes);
 414
 415        rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag,
 416                    authsize) ? -EBADMSG : 0;
 417out:
 418        spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
 419        return rc;
 420}
 421
 422static int ccm_nx_encrypt(struct aead_request   *req,
 423                          struct blkcipher_desc *desc)
 424{
 425        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
 426        struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
 427        unsigned int nbytes = req->cryptlen;
 428        unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
 429        unsigned long irq_flags;
 430        unsigned int processed = 0, to_process;
 431        int rc = -1;
 432
 433        spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 434
 435        rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
 436                          csbcpb->cpb.aes_ccm.in_pat_or_b0);
 437        if (rc)
 438                goto out;
 439
 440        do {
 441                /* to process: the AES_BLOCK_SIZE data chunk to process in this
 442                 * update. This value is bound by sg list limits.
 443                 */
 444                to_process = nbytes - processed;
 445
 446                if ((to_process + processed) < nbytes)
 447                        NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
 448                else
 449                        NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
 450
 451                NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
 452
 453                rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
 454                                        &to_process, processed,
 455                                       csbcpb->cpb.aes_ccm.iv_or_ctr);
 456                if (rc)
 457                        goto out;
 458
 459                rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
 460                                   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
 461                if (rc)
 462                        goto out;
 463
 464                /* for partial completion, copy following for next
 465                 * entry into loop...
 466                 */
 467                memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
 468                memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
 469                        csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
 470                memcpy(csbcpb->cpb.aes_ccm.in_s0,
 471                        csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);
 472
 473                NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
 474
 475                /* update stats */
 476                atomic_inc(&(nx_ctx->stats->aes_ops));
 477                atomic64_add(csbcpb->csb.processed_byte_count,
 478                             &(nx_ctx->stats->aes_bytes));
 479
 480                processed += to_process;
 481
 482        } while (processed < nbytes);
 483
 484        /* copy out the auth tag */
 485        scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac,
 486                                 req->dst, nbytes, authsize,
 487                                 SCATTERWALK_TO_SG);
 488
 489out:
 490        spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
 491        return rc;
 492}
 493
 494static int ccm4309_aes_nx_encrypt(struct aead_request *req)
 495{
 496        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
 497        struct blkcipher_desc desc;
 498        u8 *iv = nx_ctx->priv.ccm.iv;
 499
 500        iv[0] = 3;
 501        memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
 502        memcpy(iv + 4, req->iv, 8);
 503
 504        desc.info = iv;
 505        desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
 506
 507        return ccm_nx_encrypt(req, &desc);
 508}
 509
 510static int ccm_aes_nx_encrypt(struct aead_request *req)
 511{
 512        struct blkcipher_desc desc;
 513        int rc;
 514
 515        desc.info = req->iv;
 516        desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
 517
 518        rc = crypto_ccm_check_iv(desc.info);
 519        if (rc)
 520                return rc;
 521
 522        return ccm_nx_encrypt(req, &desc);
 523}
 524
 525static int ccm4309_aes_nx_decrypt(struct aead_request *req)
 526{
 527        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
 528        struct blkcipher_desc desc;
 529        u8 *iv = nx_ctx->priv.ccm.iv;
 530
 531        iv[0] = 3;
 532        memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
 533        memcpy(iv + 4, req->iv, 8);
 534
 535        desc.info = iv;
 536        desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
 537
 538        return ccm_nx_decrypt(req, &desc);
 539}
 540
 541static int ccm_aes_nx_decrypt(struct aead_request *req)
 542{
 543        struct blkcipher_desc desc;
 544        int rc;
 545
 546        desc.info = req->iv;
 547        desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
 548
 549        rc = crypto_ccm_check_iv(desc.info);
 550        if (rc)
 551                return rc;
 552
 553        return ccm_nx_decrypt(req, &desc);
 554}
 555
 556/* tell the block cipher walk routines that this is a stream cipher by
 557 * setting cra_blocksize to 1. Even using blkcipher_walk_virt_block
 558 * during encrypt/decrypt doesn't solve this problem, because it calls
 559 * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
 560 * but instead uses this tfm->blocksize. */
 561struct crypto_alg nx_ccm_aes_alg = {
 562        .cra_name        = "ccm(aes)",
 563        .cra_driver_name = "ccm-aes-nx",
 564        .cra_priority    = 300,
 565        .cra_flags       = CRYPTO_ALG_TYPE_AEAD |
 566                           CRYPTO_ALG_NEED_FALLBACK,
 567        .cra_blocksize   = 1,
 568        .cra_ctxsize     = sizeof(struct nx_crypto_ctx),
 569        .cra_type        = &crypto_aead_type,
 570        .cra_module      = THIS_MODULE,
 571        .cra_init        = nx_crypto_ctx_aes_ccm_init,
 572        .cra_exit        = nx_crypto_ctx_exit,
 573        .cra_aead = {
 574                .ivsize      = AES_BLOCK_SIZE,
 575                .maxauthsize = AES_BLOCK_SIZE,
 576                .setkey      = ccm_aes_nx_set_key,
 577                .setauthsize = ccm_aes_nx_setauthsize,
 578                .encrypt     = ccm_aes_nx_encrypt,
 579                .decrypt     = ccm_aes_nx_decrypt,
 580        }
 581};
 582
 583struct crypto_alg nx_ccm4309_aes_alg = {
 584        .cra_name        = "rfc4309(ccm(aes))",
 585        .cra_driver_name = "rfc4309-ccm-aes-nx",
 586        .cra_priority    = 300,
 587        .cra_flags       = CRYPTO_ALG_TYPE_AEAD |
 588                           CRYPTO_ALG_NEED_FALLBACK,
 589        .cra_blocksize   = 1,
 590        .cra_ctxsize     = sizeof(struct nx_crypto_ctx),
 591        .cra_type        = &crypto_nivaead_type,
 592        .cra_module      = THIS_MODULE,
 593        .cra_init        = nx_crypto_ctx_aes_ccm_init,
 594        .cra_exit        = nx_crypto_ctx_exit,
 595        .cra_aead = {
 596                .ivsize      = 8,
 597                .maxauthsize = AES_BLOCK_SIZE,
 598                .setkey      = ccm4309_aes_nx_set_key,
 599                .setauthsize = ccm4309_aes_nx_setauthsize,
 600                .encrypt     = ccm4309_aes_nx_encrypt,
 601                .decrypt     = ccm4309_aes_nx_decrypt,
 602                .geniv       = "seqiv",
 603        }
 604};
 605