linux/drivers/crypto/caam/caamalg.c
<<
>>
Prefs
   1/*
   2 * caam - Freescale FSL CAAM support for crypto API
   3 *
   4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
   5 *
   6 * Based on talitos crypto API driver.
   7 *
   8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
   9 *
  10 * ---------------                     ---------------
  11 * | JobDesc #1  |-------------------->|  ShareDesc  |
  12 * | *(packet 1) |                     |   (PDB)     |
  13 * ---------------      |------------->|  (hashKey)  |
  14 *       .              |              | (cipherKey) |
  15 *       .              |    |-------->| (operation) |
  16 * ---------------      |    |         ---------------
  17 * | JobDesc #2  |------|    |
  18 * | *(packet 2) |           |
  19 * ---------------           |
  20 *       .                   |
  21 *       .                   |
  22 * ---------------           |
  23 * | JobDesc #3  |------------
  24 * | *(packet 3) |
  25 * ---------------
  26 *
  27 * The SharedDesc never changes for a connection unless rekeyed, but
  28 * each packet will likely be in a different place. So all we need
  29 * to know to process the packet is where the input is, where the
  30 * output goes, and what context we want to process with. Context is
  31 * in the SharedDesc, packet references in the JobDesc.
  32 *
  33 * So, a job desc looks like:
  34 *
  35 * ---------------------
  36 * | Header            |
  37 * | ShareDesc Pointer |
  38 * | SEQ_OUT_PTR       |
  39 * | (output buffer)   |
  40 * | (output length)   |
  41 * | SEQ_IN_PTR        |
  42 * | (input buffer)    |
  43 * | (input length)    |
  44 * ---------------------
  45 */
  46
  47#include "compat.h"
  48
  49#include "regs.h"
  50#include "intern.h"
  51#include "desc_constr.h"
  52#include "jr.h"
  53#include "error.h"
  54#include "sg_sw_sec4.h"
  55#include "key_gen.h"
  56
  57/*
  58 * crypto alg
  59 */
  60#define CAAM_CRA_PRIORITY               3000
  61/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
  62#define CAAM_MAX_KEY_SIZE               (AES_MAX_KEY_SIZE + \
  63                                         SHA512_DIGEST_SIZE * 2)
  64/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
  65#define CAAM_MAX_IV_LENGTH              16
  66
  67/* length of descriptors text */
  68#define DESC_JOB_IO_LEN                 (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
  69
  70#define DESC_AEAD_BASE                  (4 * CAAM_CMD_SZ)
  71#define DESC_AEAD_ENC_LEN               (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ)
  72#define DESC_AEAD_DEC_LEN               (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ)
  73#define DESC_AEAD_GIVENC_LEN            (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
  74
  75#define DESC_ABLKCIPHER_BASE            (3 * CAAM_CMD_SZ)
  76#define DESC_ABLKCIPHER_ENC_LEN         (DESC_ABLKCIPHER_BASE + \
  77                                         20 * CAAM_CMD_SZ)
  78#define DESC_ABLKCIPHER_DEC_LEN         (DESC_ABLKCIPHER_BASE + \
  79                                         15 * CAAM_CMD_SZ)
  80
  81#define DESC_MAX_USED_BYTES             (DESC_AEAD_GIVENC_LEN + \
  82                                         CAAM_MAX_KEY_SIZE)
  83#define DESC_MAX_USED_LEN               (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
  84
  85#ifdef DEBUG
  86/* for print_hex_dumps with line references */
  87#define xstr(s) str(s)
  88#define str(s) #s
  89#define debug(format, arg...) printk(format, arg)
  90#else
  91#define debug(format, arg...)
  92#endif
  93
  94/* Set DK bit in class 1 operation if shared */
  95static inline void append_dec_op1(u32 *desc, u32 type)
  96{
  97        u32 *jump_cmd, *uncond_jump_cmd;
  98
  99        jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
 100        append_operation(desc, type | OP_ALG_AS_INITFINAL |
 101                         OP_ALG_DECRYPT);
 102        uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
 103        set_jump_tgt_here(desc, jump_cmd);
 104        append_operation(desc, type | OP_ALG_AS_INITFINAL |
 105                         OP_ALG_DECRYPT | OP_ALG_AAI_DK);
 106        set_jump_tgt_here(desc, uncond_jump_cmd);
 107}
 108
 109/*
 110 * Wait for completion of class 1 key loading before allowing
 111 * error propagation
 112 */
 113static inline void append_dec_shr_done(u32 *desc)
 114{
 115        u32 *jump_cmd;
 116
 117        jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL);
 118        set_jump_tgt_here(desc, jump_cmd);
 119        append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
 120}
 121
 122/*
 123 * For aead functions, read payload and write payload,
 124 * both of which are specified in req->src and req->dst
 125 */
 126static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
 127{
 128        append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
 129                             KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
 130        append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
 131}
 132
 133/*
 134 * For aead encrypt and decrypt, read iv for both classes
 135 */
 136static inline void aead_append_ld_iv(u32 *desc, int ivsize)
 137{
 138        append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
 139                   LDST_CLASS_1_CCB | ivsize);
 140        append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
 141}
 142
 143/*
 144 * For ablkcipher encrypt and decrypt, read from req->src and
 145 * write to req->dst
 146 */
 147static inline void ablkcipher_append_src_dst(u32 *desc)
 148{
 149        append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
 150        append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
 151        append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
 152                             KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
 153        append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
 154}
 155
 156/*
 157 * If all data, including src (with assoc and iv) or dst (with iv only) are
 158 * contiguous
 159 */
 160#define GIV_SRC_CONTIG          1
 161#define GIV_DST_CONTIG          (1 << 1)
 162
 163/*
 164 * per-session context
 165 */
 166struct caam_ctx {
 167        struct device *jrdev;
 168        u32 sh_desc_enc[DESC_MAX_USED_LEN];
 169        u32 sh_desc_dec[DESC_MAX_USED_LEN];
 170        u32 sh_desc_givenc[DESC_MAX_USED_LEN];
 171        dma_addr_t sh_desc_enc_dma;
 172        dma_addr_t sh_desc_dec_dma;
 173        dma_addr_t sh_desc_givenc_dma;
 174        u32 class1_alg_type;
 175        u32 class2_alg_type;
 176        u32 alg_op;
 177        u8 key[CAAM_MAX_KEY_SIZE];
 178        dma_addr_t key_dma;
 179        unsigned int enckeylen;
 180        unsigned int split_key_len;
 181        unsigned int split_key_pad_len;
 182        unsigned int authsize;
 183};
 184
 185static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
 186                            int keys_fit_inline)
 187{
 188        if (keys_fit_inline) {
 189                append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
 190                                  ctx->split_key_len, CLASS_2 |
 191                                  KEY_DEST_MDHA_SPLIT | KEY_ENC);
 192                append_key_as_imm(desc, (void *)ctx->key +
 193                                  ctx->split_key_pad_len, ctx->enckeylen,
 194                                  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
 195        } else {
 196                append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
 197                           KEY_DEST_MDHA_SPLIT | KEY_ENC);
 198                append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
 199                           ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
 200        }
 201}
 202
 203static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
 204                                  int keys_fit_inline)
 205{
 206        u32 *key_jump_cmd;
 207
 208        init_sh_desc(desc, HDR_SHARE_SERIAL);
 209
 210        /* Skip if already shared */
 211        key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
 212                                   JUMP_COND_SHRD);
 213
 214        append_key_aead(desc, ctx, keys_fit_inline);
 215
 216        set_jump_tgt_here(desc, key_jump_cmd);
 217
 218        /* Propagate errors from shared to job descriptor */
 219        append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
 220}
 221
 222static int aead_set_sh_desc(struct crypto_aead *aead)
 223{
 224        struct aead_tfm *tfm = &aead->base.crt_aead;
 225        struct caam_ctx *ctx = crypto_aead_ctx(aead);
 226        struct device *jrdev = ctx->jrdev;
 227        bool keys_fit_inline = false;
 228        u32 *key_jump_cmd, *jump_cmd;
 229        u32 geniv, moveiv;
 230        u32 *desc;
 231
 232        if (!ctx->enckeylen || !ctx->authsize)
 233                return 0;
 234
 235        /*
 236         * Job Descriptor and Shared Descriptors
 237         * must all fit into the 64-word Descriptor h/w Buffer
 238         */
 239        if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
 240            ctx->split_key_pad_len + ctx->enckeylen <=
 241            CAAM_DESC_BYTES_MAX)
 242                keys_fit_inline = true;
 243
 244        /* aead_encrypt shared descriptor */
 245        desc = ctx->sh_desc_enc;
 246
 247        init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
 248
 249        /* Class 2 operation */
 250        append_operation(desc, ctx->class2_alg_type |
 251                         OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
 252
 253        /* cryptlen = seqoutlen - authsize */
 254        append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
 255
 256        /* assoclen + cryptlen = seqinlen - ivsize */
 257        append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
 258
 259        /* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */
 260        append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
 261
 262        /* read assoc before reading payload */
 263        append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
 264                             KEY_VLF);
 265        aead_append_ld_iv(desc, tfm->ivsize);
 266
 267        /* Class 1 operation */
 268        append_operation(desc, ctx->class1_alg_type |
 269                         OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
 270
 271        /* Read and write cryptlen bytes */
 272        append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
 273        append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
 274        aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
 275
 276        /* Write ICV */
 277        append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
 278                         LDST_SRCDST_BYTE_CONTEXT);
 279
 280        ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
 281                                              desc_bytes(desc),
 282                                              DMA_TO_DEVICE);
 283        if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
 284                dev_err(jrdev, "unable to map shared descriptor\n");
 285                return -ENOMEM;
 286        }
 287#ifdef DEBUG
 288        print_hex_dump(KERN_ERR, "aead enc shdesc@"xstr(__LINE__)": ",
 289                       DUMP_PREFIX_ADDRESS, 16, 4, desc,
 290                       desc_bytes(desc), 1);
 291#endif
 292
 293        /*
 294         * Job Descriptor and Shared Descriptors
 295         * must all fit into the 64-word Descriptor h/w Buffer
 296         */
 297        if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
 298            ctx->split_key_pad_len + ctx->enckeylen <=
 299            CAAM_DESC_BYTES_MAX)
 300                keys_fit_inline = true;
 301
 302        desc = ctx->sh_desc_dec;
 303
 304        /* aead_decrypt shared descriptor */
 305        init_sh_desc(desc, HDR_SHARE_SERIAL);
 306
 307        /* Skip if already shared */
 308        key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
 309                                   JUMP_COND_SHRD);
 310
 311        append_key_aead(desc, ctx, keys_fit_inline);
 312
 313        /* Only propagate error immediately if shared */
 314        jump_cmd = append_jump(desc, JUMP_TEST_ALL);
 315        set_jump_tgt_here(desc, key_jump_cmd);
 316        append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
 317        set_jump_tgt_here(desc, jump_cmd);
 318
 319        /* Class 2 operation */
 320        append_operation(desc, ctx->class2_alg_type |
 321                         OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
 322
 323        /* assoclen + cryptlen = seqinlen - ivsize */
 324        append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
 325                                ctx->authsize + tfm->ivsize)
 326        /* assoclen = (assoclen + cryptlen) - cryptlen */
 327        append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
 328        append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
 329
 330        /* read assoc before reading payload */
 331        append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
 332                             KEY_VLF);
 333
 334        aead_append_ld_iv(desc, tfm->ivsize);
 335
 336        append_dec_op1(desc, ctx->class1_alg_type);
 337
 338        /* Read and write cryptlen bytes */
 339        append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
 340        append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
 341        aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
 342
 343        /* Load ICV */
 344        append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
 345                             FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
 346        append_dec_shr_done(desc);
 347
 348        ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
 349                                              desc_bytes(desc),
 350                                              DMA_TO_DEVICE);
 351        if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
 352                dev_err(jrdev, "unable to map shared descriptor\n");
 353                return -ENOMEM;
 354        }
 355#ifdef DEBUG
 356        print_hex_dump(KERN_ERR, "aead dec shdesc@"xstr(__LINE__)": ",
 357                       DUMP_PREFIX_ADDRESS, 16, 4, desc,
 358                       desc_bytes(desc), 1);
 359#endif
 360
 361        /*
 362         * Job Descriptor and Shared Descriptors
 363         * must all fit into the 64-word Descriptor h/w Buffer
 364         */
 365        if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
 366            ctx->split_key_pad_len + ctx->enckeylen <=
 367            CAAM_DESC_BYTES_MAX)
 368                keys_fit_inline = true;
 369
 370        /* aead_givencrypt shared descriptor */
 371        desc = ctx->sh_desc_givenc;
 372
 373        init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
 374
 375        /* Generate IV */
 376        geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
 377                NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
 378                NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
 379        append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
 380                            LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
 381        append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
 382        append_move(desc, MOVE_SRC_INFIFO |
 383                    MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
 384        append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
 385
 386        /* Copy IV to class 1 context */
 387        append_move(desc, MOVE_SRC_CLASS1CTX |
 388                    MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
 389
 390        /* Return to encryption */
 391        append_operation(desc, ctx->class2_alg_type |
 392                         OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
 393
 394        /* ivsize + cryptlen = seqoutlen - authsize */
 395        append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
 396
 397        /* assoclen = seqinlen - (ivsize + cryptlen) */
 398        append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
 399
 400        /* read assoc before reading payload */
 401        append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
 402                             KEY_VLF);
 403
 404        /* Copy iv from class 1 ctx to class 2 fifo*/
 405        moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
 406                 NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
 407        append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
 408                            LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
 409        append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
 410                            LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
 411
 412        /* Class 1 operation */
 413        append_operation(desc, ctx->class1_alg_type |
 414                         OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
 415
 416        /* Will write ivsize + cryptlen */
 417        append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
 418
 419        /* Not need to reload iv */
 420        append_seq_fifo_load(desc, tfm->ivsize,
 421                             FIFOLD_CLASS_SKIP);
 422
 423        /* Will read cryptlen */
 424        append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
 425        aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
 426
 427        /* Write ICV */
 428        append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
 429                         LDST_SRCDST_BYTE_CONTEXT);
 430
 431        ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
 432                                                 desc_bytes(desc),
 433                                                 DMA_TO_DEVICE);
 434        if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
 435                dev_err(jrdev, "unable to map shared descriptor\n");
 436                return -ENOMEM;
 437        }
 438#ifdef DEBUG
 439        print_hex_dump(KERN_ERR, "aead givenc shdesc@"xstr(__LINE__)": ",
 440                       DUMP_PREFIX_ADDRESS, 16, 4, desc,
 441                       desc_bytes(desc), 1);
 442#endif
 443
 444        return 0;
 445}
 446
 447static int aead_setauthsize(struct crypto_aead *authenc,
 448                                    unsigned int authsize)
 449{
 450        struct caam_ctx *ctx = crypto_aead_ctx(authenc);
 451
 452        ctx->authsize = authsize;
 453        aead_set_sh_desc(authenc);
 454
 455        return 0;
 456}
 457
 458static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
 459                              u32 authkeylen)
 460{
 461        return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
 462                               ctx->split_key_pad_len, key_in, authkeylen,
 463                               ctx->alg_op);
 464}
 465
 466static int aead_setkey(struct crypto_aead *aead,
 467                               const u8 *key, unsigned int keylen)
 468{
 469        /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
 470        static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
 471        struct caam_ctx *ctx = crypto_aead_ctx(aead);
 472        struct device *jrdev = ctx->jrdev;
 473        struct rtattr *rta = (void *)key;
 474        struct crypto_authenc_key_param *param;
 475        unsigned int authkeylen;
 476        unsigned int enckeylen;
 477        int ret = 0;
 478
 479        param = RTA_DATA(rta);
 480        enckeylen = be32_to_cpu(param->enckeylen);
 481
 482        key += RTA_ALIGN(rta->rta_len);
 483        keylen -= RTA_ALIGN(rta->rta_len);
 484
 485        if (keylen < enckeylen)
 486                goto badkey;
 487
 488        authkeylen = keylen - enckeylen;
 489
 490        if (keylen > CAAM_MAX_KEY_SIZE)
 491                goto badkey;
 492
 493        /* Pick class 2 key length from algorithm submask */
 494        ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
 495                                      OP_ALG_ALGSEL_SHIFT] * 2;
 496        ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
 497
 498#ifdef DEBUG
 499        printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
 500               keylen, enckeylen, authkeylen);
 501        printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
 502               ctx->split_key_len, ctx->split_key_pad_len);
 503        print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
 504                       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
 505#endif
 506
 507        ret = gen_split_aead_key(ctx, key, authkeylen);
 508        if (ret) {
 509                goto badkey;
 510        }
 511
 512        /* postpend encryption key to auth split key */
 513        memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen);
 514
 515        ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
 516                                       enckeylen, DMA_TO_DEVICE);
 517        if (dma_mapping_error(jrdev, ctx->key_dma)) {
 518                dev_err(jrdev, "unable to map key i/o memory\n");
 519                return -ENOMEM;
 520        }
 521#ifdef DEBUG
 522        print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
 523                       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
 524                       ctx->split_key_pad_len + enckeylen, 1);
 525#endif
 526
 527        ctx->enckeylen = enckeylen;
 528
 529        ret = aead_set_sh_desc(aead);
 530        if (ret) {
 531                dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
 532                                 enckeylen, DMA_TO_DEVICE);
 533        }
 534
 535        return ret;
 536badkey:
 537        crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
 538        return -EINVAL;
 539}
 540
 541static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
 542                             const u8 *key, unsigned int keylen)
 543{
 544        struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
 545        struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
 546        struct device *jrdev = ctx->jrdev;
 547        int ret = 0;
 548        u32 *key_jump_cmd, *jump_cmd;
 549        u32 *desc;
 550
 551#ifdef DEBUG
 552        print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
 553                       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
 554#endif
 555
 556        memcpy(ctx->key, key, keylen);
 557        ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
 558                                      DMA_TO_DEVICE);
 559        if (dma_mapping_error(jrdev, ctx->key_dma)) {
 560                dev_err(jrdev, "unable to map key i/o memory\n");
 561                return -ENOMEM;
 562        }
 563        ctx->enckeylen = keylen;
 564
 565        /* ablkcipher_encrypt shared descriptor */
 566        desc = ctx->sh_desc_enc;
 567        init_sh_desc(desc, HDR_SHARE_SERIAL);
 568        /* Skip if already shared */
 569        key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
 570                                   JUMP_COND_SHRD);
 571
 572        /* Load class1 key only */
 573        append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
 574                          ctx->enckeylen, CLASS_1 |
 575                          KEY_DEST_CLASS_REG);
 576
 577        set_jump_tgt_here(desc, key_jump_cmd);
 578
 579        /* Propagate errors from shared to job descriptor */
 580        append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
 581
 582        /* Load iv */
 583        append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
 584                   LDST_CLASS_1_CCB | tfm->ivsize);
 585
 586        /* Load operation */
 587        append_operation(desc, ctx->class1_alg_type |
 588                         OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
 589
 590        /* Perform operation */
 591        ablkcipher_append_src_dst(desc);
 592
 593        ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
 594                                              desc_bytes(desc),
 595                                              DMA_TO_DEVICE);
 596        if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
 597                dev_err(jrdev, "unable to map shared descriptor\n");
 598                return -ENOMEM;
 599        }
 600#ifdef DEBUG
 601        print_hex_dump(KERN_ERR, "ablkcipher enc shdesc@"xstr(__LINE__)": ",
 602                       DUMP_PREFIX_ADDRESS, 16, 4, desc,
 603                       desc_bytes(desc), 1);
 604#endif
 605        /* ablkcipher_decrypt shared descriptor */
 606        desc = ctx->sh_desc_dec;
 607
 608        init_sh_desc(desc, HDR_SHARE_SERIAL);
 609        /* Skip if already shared */
 610        key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
 611                                   JUMP_COND_SHRD);
 612
 613        /* Load class1 key only */
 614        append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
 615                          ctx->enckeylen, CLASS_1 |
 616                          KEY_DEST_CLASS_REG);
 617
 618        /* For aead, only propagate error immediately if shared */
 619        jump_cmd = append_jump(desc, JUMP_TEST_ALL);
 620        set_jump_tgt_here(desc, key_jump_cmd);
 621        append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
 622        set_jump_tgt_here(desc, jump_cmd);
 623
 624        /* load IV */
 625        append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
 626                   LDST_CLASS_1_CCB | tfm->ivsize);
 627
 628        /* Choose operation */
 629        append_dec_op1(desc, ctx->class1_alg_type);
 630
 631        /* Perform operation */
 632        ablkcipher_append_src_dst(desc);
 633
 634        /* Wait for key to load before allowing propagating error */
 635        append_dec_shr_done(desc);
 636
 637        ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
 638                                              desc_bytes(desc),
 639                                              DMA_TO_DEVICE);
 640        if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
 641                dev_err(jrdev, "unable to map shared descriptor\n");
 642                return -ENOMEM;
 643        }
 644
 645#ifdef DEBUG
 646        print_hex_dump(KERN_ERR, "ablkcipher dec shdesc@"xstr(__LINE__)": ",
 647                       DUMP_PREFIX_ADDRESS, 16, 4, desc,
 648                       desc_bytes(desc), 1);
 649#endif
 650
 651        return ret;
 652}
 653
 654/*
 655 * aead_edesc - s/w-extended aead descriptor
 656 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
 657 * @assoc_chained: if source is chained
 658 * @src_nents: number of segments in input scatterlist
 659 * @src_chained: if source is chained
 660 * @dst_nents: number of segments in output scatterlist
 661 * @dst_chained: if destination is chained
 662 * @iv_dma: dma address of iv for checking continuity and link table
 663 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
 664 * @sec4_sg_bytes: length of dma mapped sec4_sg space
 665 * @sec4_sg_dma: bus physical mapped address of h/w link table
 666 * @hw_desc: the h/w job descriptor followed by any referenced link tables
 667 */
 668struct aead_edesc {
 669        int assoc_nents;
 670        bool assoc_chained;
 671        int src_nents;
 672        bool src_chained;
 673        int dst_nents;
 674        bool dst_chained;
 675        dma_addr_t iv_dma;
 676        int sec4_sg_bytes;
 677        dma_addr_t sec4_sg_dma;
 678        struct sec4_sg_entry *sec4_sg;
 679        u32 hw_desc[0];
 680};
 681
 682/*
 683 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
 684 * @src_nents: number of segments in input scatterlist
 685 * @src_chained: if source is chained
 686 * @dst_nents: number of segments in output scatterlist
 687 * @dst_chained: if destination is chained
 688 * @iv_dma: dma address of iv for checking continuity and link table
 689 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
 690 * @sec4_sg_bytes: length of dma mapped sec4_sg space
 691 * @sec4_sg_dma: bus physical mapped address of h/w link table
 692 * @hw_desc: the h/w job descriptor followed by any referenced link tables
 693 */
 694struct ablkcipher_edesc {
 695        int src_nents;
 696        bool src_chained;
 697        int dst_nents;
 698        bool dst_chained;
 699        dma_addr_t iv_dma;
 700        int sec4_sg_bytes;
 701        dma_addr_t sec4_sg_dma;
 702        struct sec4_sg_entry *sec4_sg;
 703        u32 hw_desc[0];
 704};
 705
 706static void caam_unmap(struct device *dev, struct scatterlist *src,
 707                       struct scatterlist *dst, int src_nents,
 708                       bool src_chained, int dst_nents, bool dst_chained,
 709                       dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
 710                       int sec4_sg_bytes)
 711{
 712        if (dst != src) {
 713                dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
 714                                     src_chained);
 715                dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
 716                                     dst_chained);
 717        } else {
 718                dma_unmap_sg_chained(dev, src, src_nents ? : 1,
 719                                     DMA_BIDIRECTIONAL, src_chained);
 720        }
 721
 722        if (iv_dma)
 723                dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
 724        if (sec4_sg_bytes)
 725                dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
 726                                 DMA_TO_DEVICE);
 727}
 728
 729static void aead_unmap(struct device *dev,
 730                       struct aead_edesc *edesc,
 731                       struct aead_request *req)
 732{
 733        struct crypto_aead *aead = crypto_aead_reqtfm(req);
 734        int ivsize = crypto_aead_ivsize(aead);
 735
 736        dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
 737                             DMA_TO_DEVICE, edesc->assoc_chained);
 738
 739        caam_unmap(dev, req->src, req->dst,
 740                   edesc->src_nents, edesc->src_chained, edesc->dst_nents,
 741                   edesc->dst_chained, edesc->iv_dma, ivsize,
 742                   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
 743}
 744
 745static void ablkcipher_unmap(struct device *dev,
 746                             struct ablkcipher_edesc *edesc,
 747                             struct ablkcipher_request *req)
 748{
 749        struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
 750        int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
 751
 752        caam_unmap(dev, req->src, req->dst,
 753                   edesc->src_nents, edesc->src_chained, edesc->dst_nents,
 754                   edesc->dst_chained, edesc->iv_dma, ivsize,
 755                   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
 756}
 757
 758static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
 759                                   void *context)
 760{
 761        struct aead_request *req = context;
 762        struct aead_edesc *edesc;
 763#ifdef DEBUG
 764        struct crypto_aead *aead = crypto_aead_reqtfm(req);
 765        struct caam_ctx *ctx = crypto_aead_ctx(aead);
 766        int ivsize = crypto_aead_ivsize(aead);
 767
 768        dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
 769#endif
 770
 771        edesc = (struct aead_edesc *)((char *)desc -
 772                 offsetof(struct aead_edesc, hw_desc));
 773
 774        if (err) {
 775                char tmp[CAAM_ERROR_STR_MAX];
 776
 777                dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
 778        }
 779
 780        aead_unmap(jrdev, edesc, req);
 781
 782#ifdef DEBUG
 783        print_hex_dump(KERN_ERR, "assoc  @"xstr(__LINE__)": ",
 784                       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
 785                       req->assoclen , 1);
 786        print_hex_dump(KERN_ERR, "dstiv  @"xstr(__LINE__)": ",
 787                       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
 788                       edesc->src_nents ? 100 : ivsize, 1);
 789        print_hex_dump(KERN_ERR, "dst    @"xstr(__LINE__)": ",
 790                       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
 791                       edesc->src_nents ? 100 : req->cryptlen +
 792                       ctx->authsize + 4, 1);
 793#endif
 794
 795        kfree(edesc);
 796
 797        aead_request_complete(req, err);
 798}
 799
 800static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
 801                                   void *context)
 802{
 803        struct aead_request *req = context;
 804        struct aead_edesc *edesc;
 805#ifdef DEBUG
 806        struct crypto_aead *aead = crypto_aead_reqtfm(req);
 807        struct caam_ctx *ctx = crypto_aead_ctx(aead);
 808        int ivsize = crypto_aead_ivsize(aead);
 809
 810        dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
 811#endif
 812
 813        edesc = (struct aead_edesc *)((char *)desc -
 814                 offsetof(struct aead_edesc, hw_desc));
 815
 816#ifdef DEBUG
 817        print_hex_dump(KERN_ERR, "dstiv  @"xstr(__LINE__)": ",
 818                       DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
 819                       ivsize, 1);
 820        print_hex_dump(KERN_ERR, "dst    @"xstr(__LINE__)": ",
 821                       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
 822                       req->cryptlen, 1);
 823#endif
 824
 825        if (err) {
 826                char tmp[CAAM_ERROR_STR_MAX];
 827
 828                dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
 829        }
 830
 831        aead_unmap(jrdev, edesc, req);
 832
 833        /*
 834         * verify hw auth check passed else return -EBADMSG
 835         */
 836        if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
 837                err = -EBADMSG;
 838
 839#ifdef DEBUG
 840        print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ",
 841                       DUMP_PREFIX_ADDRESS, 16, 4,
 842                       ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
 843                       sizeof(struct iphdr) + req->assoclen +
 844                       ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
 845                       ctx->authsize + 36, 1);
 846        if (!err && edesc->sec4_sg_bytes) {
 847                struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
 848                print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ",
 849                               DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
 850                        sg->length + ctx->authsize + 16, 1);
 851        }
 852#endif
 853
 854        kfree(edesc);
 855
 856        aead_request_complete(req, err);
 857}
 858
 859static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
 860                                   void *context)
 861{
 862        struct ablkcipher_request *req = context;
 863        struct ablkcipher_edesc *edesc;
 864#ifdef DEBUG
 865        struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
 866        int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
 867
 868        dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
 869#endif
 870
 871        edesc = (struct ablkcipher_edesc *)((char *)desc -
 872                 offsetof(struct ablkcipher_edesc, hw_desc));
 873
 874        if (err) {
 875                char tmp[CAAM_ERROR_STR_MAX];
 876
 877                dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
 878        }
 879
 880#ifdef DEBUG
 881        print_hex_dump(KERN_ERR, "dstiv  @"xstr(__LINE__)": ",
 882                       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
 883                       edesc->src_nents > 1 ? 100 : ivsize, 1);
 884        print_hex_dump(KERN_ERR, "dst    @"xstr(__LINE__)": ",
 885                       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
 886                       edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
 887#endif
 888
 889        ablkcipher_unmap(jrdev, edesc, req);
 890        kfree(edesc);
 891
 892        ablkcipher_request_complete(req, err);
 893}
 894
 895static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
 896                                    void *context)
 897{
 898        struct ablkcipher_request *req = context;
 899        struct ablkcipher_edesc *edesc;
 900#ifdef DEBUG
 901        struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
 902        int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
 903
 904        dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
 905#endif
 906
 907        edesc = (struct ablkcipher_edesc *)((char *)desc -
 908                 offsetof(struct ablkcipher_edesc, hw_desc));
 909        if (err) {
 910                char tmp[CAAM_ERROR_STR_MAX];
 911
 912                dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
 913        }
 914
 915#ifdef DEBUG
 916        print_hex_dump(KERN_ERR, "dstiv  @"xstr(__LINE__)": ",
 917                       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
 918                       ivsize, 1);
 919        print_hex_dump(KERN_ERR, "dst    @"xstr(__LINE__)": ",
 920                       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
 921                       edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
 922#endif
 923
 924        ablkcipher_unmap(jrdev, edesc, req);
 925        kfree(edesc);
 926
 927        ablkcipher_request_complete(req, err);
 928}
 929
 930/*
 931 * Fill in aead job descriptor
 932 */
 933static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
 934                          struct aead_edesc *edesc,
 935                          struct aead_request *req,
 936                          bool all_contig, bool encrypt)
 937{
 938        struct crypto_aead *aead = crypto_aead_reqtfm(req);
 939        struct caam_ctx *ctx = crypto_aead_ctx(aead);
 940        int ivsize = crypto_aead_ivsize(aead);
 941        int authsize = ctx->authsize;
 942        u32 *desc = edesc->hw_desc;
 943        u32 out_options = 0, in_options;
 944        dma_addr_t dst_dma, src_dma;
 945        int len, sec4_sg_index = 0;
 946
 947#ifdef DEBUG
 948        debug("assoclen %d cryptlen %d authsize %d\n",
 949              req->assoclen, req->cryptlen, authsize);
 950        print_hex_dump(KERN_ERR, "assoc  @"xstr(__LINE__)": ",
 951                       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
 952                       req->assoclen , 1);
 953        print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
 954                       DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
 955                       edesc->src_nents ? 100 : ivsize, 1);
 956        print_hex_dump(KERN_ERR, "src    @"xstr(__LINE__)": ",
 957                       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
 958                        edesc->src_nents ? 100 : req->cryptlen, 1);
 959        print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
 960                       DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
 961                       desc_bytes(sh_desc), 1);
 962#endif
 963
 964        len = desc_len(sh_desc);
 965        init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
 966
 967        if (all_contig) {
 968                src_dma = sg_dma_address(req->assoc);
 969                in_options = 0;
 970        } else {
 971                src_dma = edesc->sec4_sg_dma;
 972                sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
 973                                 (edesc->src_nents ? : 1);
 974                in_options = LDST_SGF;
 975        }
 976        if (encrypt)
 977                append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
 978                                  req->cryptlen - authsize, in_options);
 979        else
 980                append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
 981                                  req->cryptlen, in_options);
 982
 983        if (likely(req->src == req->dst)) {
 984                if (all_contig) {
 985                        dst_dma = sg_dma_address(req->src);
 986                } else {
 987                        dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
 988                                  ((edesc->assoc_nents ? : 1) + 1);
 989                        out_options = LDST_SGF;
 990                }
 991        } else {
 992                if (!edesc->dst_nents) {
 993                        dst_dma = sg_dma_address(req->dst);
 994                } else {
 995                        dst_dma = edesc->sec4_sg_dma +
 996                                  sec4_sg_index *
 997                                  sizeof(struct sec4_sg_entry);
 998                        out_options = LDST_SGF;
 999                }
1000        }
1001        if (encrypt)
1002                append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
1003        else
1004                append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
1005                                   out_options);
1006}
1007
1008/*
1009 * Fill in aead givencrypt job descriptor
1010 */
1011static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1012                              struct aead_edesc *edesc,
1013                              struct aead_request *req,
1014                              int contig)
1015{
1016        struct crypto_aead *aead = crypto_aead_reqtfm(req);
1017        struct caam_ctx *ctx = crypto_aead_ctx(aead);
1018        int ivsize = crypto_aead_ivsize(aead);
1019        int authsize = ctx->authsize;
1020        u32 *desc = edesc->hw_desc;
1021        u32 out_options = 0, in_options;
1022        dma_addr_t dst_dma, src_dma;
1023        int len, sec4_sg_index = 0;
1024
1025#ifdef DEBUG
1026        debug("assoclen %d cryptlen %d authsize %d\n",
1027              req->assoclen, req->cryptlen, authsize);
1028        print_hex_dump(KERN_ERR, "assoc  @"xstr(__LINE__)": ",
1029                       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1030                       req->assoclen , 1);
1031        print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
1032                       DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
1033        print_hex_dump(KERN_ERR, "src    @"xstr(__LINE__)": ",
1034                       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1035                        edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
1036        print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
1037                       DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
1038                       desc_bytes(sh_desc), 1);
1039#endif
1040
1041        len = desc_len(sh_desc);
1042        init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1043
1044        if (contig & GIV_SRC_CONTIG) {
1045                src_dma = sg_dma_address(req->assoc);
1046                in_options = 0;
1047        } else {
1048                src_dma = edesc->sec4_sg_dma;
1049                sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
1050                in_options = LDST_SGF;
1051        }
1052        append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
1053                          req->cryptlen - authsize, in_options);
1054
1055        if (contig & GIV_DST_CONTIG) {
1056                dst_dma = edesc->iv_dma;
1057        } else {
1058                if (likely(req->src == req->dst)) {
1059                        dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
1060                                  edesc->assoc_nents;
1061                        out_options = LDST_SGF;
1062                } else {
1063                        dst_dma = edesc->sec4_sg_dma +
1064                                  sec4_sg_index *
1065                                  sizeof(struct sec4_sg_entry);
1066                        out_options = LDST_SGF;
1067                }
1068        }
1069
1070        append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options);
1071}
1072
1073/*
1074 * Fill in ablkcipher job descriptor
1075 */
1076static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1077                                struct ablkcipher_edesc *edesc,
1078                                struct ablkcipher_request *req,
1079                                bool iv_contig)
1080{
1081        struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1082        int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1083        u32 *desc = edesc->hw_desc;
1084        u32 out_options = 0, in_options;
1085        dma_addr_t dst_dma, src_dma;
1086        int len, sec4_sg_index = 0;
1087
1088#ifdef DEBUG
1089        print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
1090                       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1091                       ivsize, 1);
1092        print_hex_dump(KERN_ERR, "src    @"xstr(__LINE__)": ",
1093                       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1094                       edesc->src_nents ? 100 : req->nbytes, 1);
1095#endif
1096
1097        len = desc_len(sh_desc);
1098        init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1099
1100        if (iv_contig) {
1101                src_dma = edesc->iv_dma;
1102                in_options = 0;
1103        } else {
1104                src_dma = edesc->sec4_sg_dma;
1105                sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents;
1106                in_options = LDST_SGF;
1107        }
1108        append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
1109
1110        if (likely(req->src == req->dst)) {
1111                if (!edesc->src_nents && iv_contig) {
1112                        dst_dma = sg_dma_address(req->src);
1113                } else {
1114                        dst_dma = edesc->sec4_sg_dma +
1115                                sizeof(struct sec4_sg_entry);
1116                        out_options = LDST_SGF;
1117                }
1118        } else {
1119                if (!edesc->dst_nents) {
1120                        dst_dma = sg_dma_address(req->dst);
1121                } else {
1122                        dst_dma = edesc->sec4_sg_dma +
1123                                sec4_sg_index * sizeof(struct sec4_sg_entry);
1124                        out_options = LDST_SGF;
1125                }
1126        }
1127        append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
1128}
1129
1130/*
1131 * allocate and map the aead extended descriptor
1132 */
1133static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1134                                           int desc_bytes, bool *all_contig_ptr)
1135{
1136        struct crypto_aead *aead = crypto_aead_reqtfm(req);
1137        struct caam_ctx *ctx = crypto_aead_ctx(aead);
1138        struct device *jrdev = ctx->jrdev;
1139        gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1140                       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1141        int assoc_nents, src_nents, dst_nents = 0;
1142        struct aead_edesc *edesc;
1143        dma_addr_t iv_dma = 0;
1144        int sgc;
1145        bool all_contig = true;
1146        bool assoc_chained = false, src_chained = false, dst_chained = false;
1147        int ivsize = crypto_aead_ivsize(aead);
1148        int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1149
1150        assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1151        src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1152
1153        if (unlikely(req->dst != req->src))
1154                dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
1155
1156        sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
1157                                 DMA_BIDIRECTIONAL, assoc_chained);
1158        if (likely(req->src == req->dst)) {
1159                sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1160                                         DMA_BIDIRECTIONAL, src_chained);
1161        } else {
1162                sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1163                                         DMA_TO_DEVICE, src_chained);
1164                sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1165                                         DMA_FROM_DEVICE, dst_chained);
1166        }
1167
1168        /* Check if data are contiguous */
1169        iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
1170        if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1171            iv_dma || src_nents || iv_dma + ivsize !=
1172            sg_dma_address(req->src)) {
1173                all_contig = false;
1174                assoc_nents = assoc_nents ? : 1;
1175                src_nents = src_nents ? : 1;
1176                sec4_sg_len = assoc_nents + 1 + src_nents;
1177        }
1178        sec4_sg_len += dst_nents;
1179
1180        sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1181
1182        /* allocate space for base edesc and hw desc commands, link tables */
1183        edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1184                        sec4_sg_bytes, GFP_DMA | flags);
1185        if (!edesc) {
1186                dev_err(jrdev, "could not allocate extended descriptor\n");
1187                return ERR_PTR(-ENOMEM);
1188        }
1189
1190        edesc->assoc_nents = assoc_nents;
1191        edesc->assoc_chained = assoc_chained;
1192        edesc->src_nents = src_nents;
1193        edesc->src_chained = src_chained;
1194        edesc->dst_nents = dst_nents;
1195        edesc->dst_chained = dst_chained;
1196        edesc->iv_dma = iv_dma;
1197        edesc->sec4_sg_bytes = sec4_sg_bytes;
1198        edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1199                         desc_bytes;
1200        edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1201                                            sec4_sg_bytes, DMA_TO_DEVICE);
1202        *all_contig_ptr = all_contig;
1203
1204        sec4_sg_index = 0;
1205        if (!all_contig) {
1206                sg_to_sec4_sg(req->assoc,
1207                              (assoc_nents ? : 1),
1208                              edesc->sec4_sg +
1209                              sec4_sg_index, 0);
1210                sec4_sg_index += assoc_nents ? : 1;
1211                dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1212                                   iv_dma, ivsize, 0);
1213                sec4_sg_index += 1;
1214                sg_to_sec4_sg_last(req->src,
1215                                   (src_nents ? : 1),
1216                                   edesc->sec4_sg +
1217                                   sec4_sg_index, 0);
1218                sec4_sg_index += src_nents ? : 1;
1219        }
1220        if (dst_nents) {
1221                sg_to_sec4_sg_last(req->dst, dst_nents,
1222                                   edesc->sec4_sg + sec4_sg_index, 0);
1223        }
1224
1225        return edesc;
1226}
1227
1228static int aead_encrypt(struct aead_request *req)
1229{
1230        struct aead_edesc *edesc;
1231        struct crypto_aead *aead = crypto_aead_reqtfm(req);
1232        struct caam_ctx *ctx = crypto_aead_ctx(aead);
1233        struct device *jrdev = ctx->jrdev;
1234        bool all_contig;
1235        u32 *desc;
1236        int ret = 0;
1237
1238        req->cryptlen += ctx->authsize;
1239
1240        /* allocate extended descriptor */
1241        edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1242                                 CAAM_CMD_SZ, &all_contig);
1243        if (IS_ERR(edesc))
1244                return PTR_ERR(edesc);
1245
1246        /* Create and submit job descriptor */
1247        init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
1248                      all_contig, true);
1249#ifdef DEBUG
1250        print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
1251                       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1252                       desc_bytes(edesc->hw_desc), 1);
1253#endif
1254
1255        desc = edesc->hw_desc;
1256        ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1257        if (!ret) {
1258                ret = -EINPROGRESS;
1259        } else {
1260                aead_unmap(jrdev, edesc, req);
1261                kfree(edesc);
1262        }
1263
1264        return ret;
1265}
1266
1267static int aead_decrypt(struct aead_request *req)
1268{
1269        struct aead_edesc *edesc;
1270        struct crypto_aead *aead = crypto_aead_reqtfm(req);
1271        struct caam_ctx *ctx = crypto_aead_ctx(aead);
1272        struct device *jrdev = ctx->jrdev;
1273        bool all_contig;
1274        u32 *desc;
1275        int ret = 0;
1276
1277        /* allocate extended descriptor */
1278        edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1279                                 CAAM_CMD_SZ, &all_contig);
1280        if (IS_ERR(edesc))
1281                return PTR_ERR(edesc);
1282
1283#ifdef DEBUG
1284        print_hex_dump(KERN_ERR, "dec src@"xstr(__LINE__)": ",
1285                       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1286                       req->cryptlen, 1);
1287#endif
1288
1289        /* Create and submit job descriptor*/
1290        init_aead_job(ctx->sh_desc_dec,
1291                      ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
1292#ifdef DEBUG
1293        print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
1294                       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1295                       desc_bytes(edesc->hw_desc), 1);
1296#endif
1297
1298        desc = edesc->hw_desc;
1299        ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1300        if (!ret) {
1301                ret = -EINPROGRESS;
1302        } else {
1303                aead_unmap(jrdev, edesc, req);
1304                kfree(edesc);
1305        }
1306
1307        return ret;
1308}
1309
1310/*
1311 * allocate and map the aead extended descriptor for aead givencrypt
1312 */
1313static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
1314                                               *greq, int desc_bytes,
1315                                               u32 *contig_ptr)
1316{
1317        struct aead_request *req = &greq->areq;
1318        struct crypto_aead *aead = crypto_aead_reqtfm(req);
1319        struct caam_ctx *ctx = crypto_aead_ctx(aead);
1320        struct device *jrdev = ctx->jrdev;
1321        gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1322                       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1323        int assoc_nents, src_nents, dst_nents = 0;
1324        struct aead_edesc *edesc;
1325        dma_addr_t iv_dma = 0;
1326        int sgc;
1327        u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
1328        int ivsize = crypto_aead_ivsize(aead);
1329        bool assoc_chained = false, src_chained = false, dst_chained = false;
1330        int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1331
1332        assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1333        src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1334
1335        if (unlikely(req->dst != req->src))
1336                dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
1337
1338        sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
1339                                 DMA_BIDIRECTIONAL, assoc_chained);
1340        if (likely(req->src == req->dst)) {
1341                sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1342                                         DMA_BIDIRECTIONAL, src_chained);
1343        } else {
1344                sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1345                                         DMA_TO_DEVICE, src_chained);
1346                sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1347                                         DMA_FROM_DEVICE, dst_chained);
1348        }
1349
1350        /* Check if data are contiguous */
1351        iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
1352        if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1353            iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
1354                contig &= ~GIV_SRC_CONTIG;
1355        if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
1356                contig &= ~GIV_DST_CONTIG;
1357        if (unlikely(req->src != req->dst)) {
1358                dst_nents = dst_nents ? : 1;
1359                sec4_sg_len += 1;
1360        }
1361        if (!(contig & GIV_SRC_CONTIG)) {
1362                assoc_nents = assoc_nents ? : 1;
1363                src_nents = src_nents ? : 1;
1364                sec4_sg_len += assoc_nents + 1 + src_nents;
1365                if (likely(req->src == req->dst))
1366                        contig &= ~GIV_DST_CONTIG;
1367        }
1368        sec4_sg_len += dst_nents;
1369
1370        sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1371
1372        /* allocate space for base edesc and hw desc commands, link tables */
1373        edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1374                        sec4_sg_bytes, GFP_DMA | flags);
1375        if (!edesc) {
1376                dev_err(jrdev, "could not allocate extended descriptor\n");
1377                return ERR_PTR(-ENOMEM);
1378        }
1379
1380        edesc->assoc_nents = assoc_nents;
1381        edesc->assoc_chained = assoc_chained;
1382        edesc->src_nents = src_nents;
1383        edesc->src_chained = src_chained;
1384        edesc->dst_nents = dst_nents;
1385        edesc->dst_chained = dst_chained;
1386        edesc->iv_dma = iv_dma;
1387        edesc->sec4_sg_bytes = sec4_sg_bytes;
1388        edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1389                         desc_bytes;
1390        edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1391                                            sec4_sg_bytes, DMA_TO_DEVICE);
1392        *contig_ptr = contig;
1393
1394        sec4_sg_index = 0;
1395        if (!(contig & GIV_SRC_CONTIG)) {
1396                sg_to_sec4_sg(req->assoc, assoc_nents,
1397                              edesc->sec4_sg +
1398                              sec4_sg_index, 0);
1399                sec4_sg_index += assoc_nents;
1400                dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1401                                   iv_dma, ivsize, 0);
1402                sec4_sg_index += 1;
1403                sg_to_sec4_sg_last(req->src, src_nents,
1404                                   edesc->sec4_sg +
1405                                   sec4_sg_index, 0);
1406                sec4_sg_index += src_nents;
1407        }
1408        if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
1409                dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1410                                   iv_dma, ivsize, 0);
1411                sec4_sg_index += 1;
1412                sg_to_sec4_sg_last(req->dst, dst_nents,
1413                                   edesc->sec4_sg + sec4_sg_index, 0);
1414        }
1415
1416        return edesc;
1417}
1418
1419static int aead_givencrypt(struct aead_givcrypt_request *areq)
1420{
1421        struct aead_request *req = &areq->areq;
1422        struct aead_edesc *edesc;
1423        struct crypto_aead *aead = crypto_aead_reqtfm(req);
1424        struct caam_ctx *ctx = crypto_aead_ctx(aead);
1425        struct device *jrdev = ctx->jrdev;
1426        u32 contig;
1427        u32 *desc;
1428        int ret = 0;
1429
1430        req->cryptlen += ctx->authsize;
1431
1432        /* allocate extended descriptor */
1433        edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
1434                                     CAAM_CMD_SZ, &contig);
1435
1436        if (IS_ERR(edesc))
1437                return PTR_ERR(edesc);
1438
1439#ifdef DEBUG
1440        print_hex_dump(KERN_ERR, "giv src@"xstr(__LINE__)": ",
1441                       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1442                       req->cryptlen, 1);
1443#endif
1444
1445        /* Create and submit job descriptor*/
1446        init_aead_giv_job(ctx->sh_desc_givenc,
1447                          ctx->sh_desc_givenc_dma, edesc, req, contig);
1448#ifdef DEBUG
1449        print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
1450                       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1451                       desc_bytes(edesc->hw_desc), 1);
1452#endif
1453
1454        desc = edesc->hw_desc;
1455        ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1456        if (!ret) {
1457                ret = -EINPROGRESS;
1458        } else {
1459                aead_unmap(jrdev, edesc, req);
1460                kfree(edesc);
1461        }
1462
1463        return ret;
1464}
1465
1466/*
1467 * allocate and map the ablkcipher extended descriptor for ablkcipher
1468 */
1469static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1470                                                       *req, int desc_bytes,
1471                                                       bool *iv_contig_out)
1472{
1473        struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1474        struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1475        struct device *jrdev = ctx->jrdev;
1476        gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1477                                          CRYPTO_TFM_REQ_MAY_SLEEP)) ?
1478                       GFP_KERNEL : GFP_ATOMIC;
1479        int src_nents, dst_nents = 0, sec4_sg_bytes;
1480        struct ablkcipher_edesc *edesc;
1481        dma_addr_t iv_dma = 0;
1482        bool iv_contig = false;
1483        int sgc;
1484        int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1485        bool src_chained = false, dst_chained = false;
1486        int sec4_sg_index;
1487
1488        src_nents = sg_count(req->src, req->nbytes, &src_chained);
1489
1490        if (req->dst != req->src)
1491                dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
1492
1493        if (likely(req->src == req->dst)) {
1494                sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1495                                         DMA_BIDIRECTIONAL, src_chained);
1496        } else {
1497                sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1498                                         DMA_TO_DEVICE, src_chained);
1499                sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1500                                         DMA_FROM_DEVICE, dst_chained);
1501        }
1502
1503        /*
1504         * Check if iv can be contiguous with source and destination.
1505         * If so, include it. If not, create scatterlist.
1506         */
1507        iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
1508        if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
1509                iv_contig = true;
1510        else
1511                src_nents = src_nents ? : 1;
1512        sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
1513                        sizeof(struct sec4_sg_entry);
1514
1515        /* allocate space for base edesc and hw desc commands, link tables */
1516        edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
1517                        sec4_sg_bytes, GFP_DMA | flags);
1518        if (!edesc) {
1519                dev_err(jrdev, "could not allocate extended descriptor\n");
1520                return ERR_PTR(-ENOMEM);
1521        }
1522
1523        edesc->src_nents = src_nents;
1524        edesc->src_chained = src_chained;
1525        edesc->dst_nents = dst_nents;
1526        edesc->dst_chained = dst_chained;
1527        edesc->sec4_sg_bytes = sec4_sg_bytes;
1528        edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
1529                         desc_bytes;
1530
1531        sec4_sg_index = 0;
1532        if (!iv_contig) {
1533                dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
1534                sg_to_sec4_sg_last(req->src, src_nents,
1535                                   edesc->sec4_sg + 1, 0);
1536                sec4_sg_index += 1 + src_nents;
1537        }
1538
1539        if (dst_nents) {
1540                sg_to_sec4_sg_last(req->dst, dst_nents,
1541                        edesc->sec4_sg + sec4_sg_index, 0);
1542        }
1543
1544        edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1545                                            sec4_sg_bytes, DMA_TO_DEVICE);
1546        edesc->iv_dma = iv_dma;
1547
1548#ifdef DEBUG
1549        print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"xstr(__LINE__)": ",
1550                       DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1551                       sec4_sg_bytes, 1);
1552#endif
1553
1554        *iv_contig_out = iv_contig;
1555        return edesc;
1556}
1557
1558static int ablkcipher_encrypt(struct ablkcipher_request *req)
1559{
1560        struct ablkcipher_edesc *edesc;
1561        struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1562        struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1563        struct device *jrdev = ctx->jrdev;
1564        bool iv_contig;
1565        u32 *desc;
1566        int ret = 0;
1567
1568        /* allocate extended descriptor */
1569        edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1570                                       CAAM_CMD_SZ, &iv_contig);
1571        if (IS_ERR(edesc))
1572                return PTR_ERR(edesc);
1573
1574        /* Create and submit job descriptor*/
1575        init_ablkcipher_job(ctx->sh_desc_enc,
1576                ctx->sh_desc_enc_dma, edesc, req, iv_contig);
1577#ifdef DEBUG
1578        print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
1579                       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1580                       desc_bytes(edesc->hw_desc), 1);
1581#endif
1582        desc = edesc->hw_desc;
1583        ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
1584
1585        if (!ret) {
1586                ret = -EINPROGRESS;
1587        } else {
1588                ablkcipher_unmap(jrdev, edesc, req);
1589                kfree(edesc);
1590        }
1591
1592        return ret;
1593}
1594
1595static int ablkcipher_decrypt(struct ablkcipher_request *req)
1596{
1597        struct ablkcipher_edesc *edesc;
1598        struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1599        struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1600        struct device *jrdev = ctx->jrdev;
1601        bool iv_contig;
1602        u32 *desc;
1603        int ret = 0;
1604
1605        /* allocate extended descriptor */
1606        edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1607                                       CAAM_CMD_SZ, &iv_contig);
1608        if (IS_ERR(edesc))
1609                return PTR_ERR(edesc);
1610
1611        /* Create and submit job descriptor*/
1612        init_ablkcipher_job(ctx->sh_desc_dec,
1613                ctx->sh_desc_dec_dma, edesc, req, iv_contig);
1614        desc = edesc->hw_desc;
1615#ifdef DEBUG
1616        print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
1617                       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1618                       desc_bytes(edesc->hw_desc), 1);
1619#endif
1620
1621        ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
1622        if (!ret) {
1623                ret = -EINPROGRESS;
1624        } else {
1625                ablkcipher_unmap(jrdev, edesc, req);
1626                kfree(edesc);
1627        }
1628
1629        return ret;
1630}
1631
1632#define template_aead           template_u.aead
1633#define template_ablkcipher     template_u.ablkcipher
1634struct caam_alg_template {
1635        char name[CRYPTO_MAX_ALG_NAME];
1636        char driver_name[CRYPTO_MAX_ALG_NAME];
1637        unsigned int blocksize;
1638        u32 type;
1639        union {
1640                struct ablkcipher_alg ablkcipher;
1641                struct aead_alg aead;
1642                struct blkcipher_alg blkcipher;
1643                struct cipher_alg cipher;
1644                struct compress_alg compress;
1645                struct rng_alg rng;
1646        } template_u;
1647        u32 class1_alg_type;
1648        u32 class2_alg_type;
1649        u32 alg_op;
1650};
1651
1652static struct caam_alg_template driver_algs[] = {
1653        /* single-pass ipsec_esp descriptor */
1654        {
1655                .name = "authenc(hmac(md5),cbc(aes))",
1656                .driver_name = "authenc-hmac-md5-cbc-aes-caam",
1657                .blocksize = AES_BLOCK_SIZE,
1658                .type = CRYPTO_ALG_TYPE_AEAD,
1659                .template_aead = {
1660                        .setkey = aead_setkey,
1661                        .setauthsize = aead_setauthsize,
1662                        .encrypt = aead_encrypt,
1663                        .decrypt = aead_decrypt,
1664                        .givencrypt = aead_givencrypt,
1665                        .geniv = "<built-in>",
1666                        .ivsize = AES_BLOCK_SIZE,
1667                        .maxauthsize = MD5_DIGEST_SIZE,
1668                        },
1669                .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1670                .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1671                .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1672        },
1673        {
1674                .name = "authenc(hmac(sha1),cbc(aes))",
1675                .driver_name = "authenc-hmac-sha1-cbc-aes-caam",
1676                .blocksize = AES_BLOCK_SIZE,
1677                .type = CRYPTO_ALG_TYPE_AEAD,
1678                .template_aead = {
1679                        .setkey = aead_setkey,
1680                        .setauthsize = aead_setauthsize,
1681                        .encrypt = aead_encrypt,
1682                        .decrypt = aead_decrypt,
1683                        .givencrypt = aead_givencrypt,
1684                        .geniv = "<built-in>",
1685                        .ivsize = AES_BLOCK_SIZE,
1686                        .maxauthsize = SHA1_DIGEST_SIZE,
1687                        },
1688                .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1689                .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1690                .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1691        },
1692        {
1693                .name = "authenc(hmac(sha224),cbc(aes))",
1694                .driver_name = "authenc-hmac-sha224-cbc-aes-caam",
1695                .blocksize = AES_BLOCK_SIZE,
1696                .template_aead = {
1697                        .setkey = aead_setkey,
1698                        .setauthsize = aead_setauthsize,
1699                        .encrypt = aead_encrypt,
1700                        .decrypt = aead_decrypt,
1701                        .givencrypt = aead_givencrypt,
1702                        .geniv = "<built-in>",
1703                        .ivsize = AES_BLOCK_SIZE,
1704                        .maxauthsize = SHA224_DIGEST_SIZE,
1705                        },
1706                .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1707                .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1708                                   OP_ALG_AAI_HMAC_PRECOMP,
1709                .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1710        },
1711        {
1712                .name = "authenc(hmac(sha256),cbc(aes))",
1713                .driver_name = "authenc-hmac-sha256-cbc-aes-caam",
1714                .blocksize = AES_BLOCK_SIZE,
1715                .type = CRYPTO_ALG_TYPE_AEAD,
1716                .template_aead = {
1717                        .setkey = aead_setkey,
1718                        .setauthsize = aead_setauthsize,
1719                        .encrypt = aead_encrypt,
1720                        .decrypt = aead_decrypt,
1721                        .givencrypt = aead_givencrypt,
1722                        .geniv = "<built-in>",
1723                        .ivsize = AES_BLOCK_SIZE,
1724                        .maxauthsize = SHA256_DIGEST_SIZE,
1725                        },
1726                .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1727                .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1728                                   OP_ALG_AAI_HMAC_PRECOMP,
1729                .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1730        },
1731        {
1732                .name = "authenc(hmac(sha384),cbc(aes))",
1733                .driver_name = "authenc-hmac-sha384-cbc-aes-caam",
1734                .blocksize = AES_BLOCK_SIZE,
1735                .template_aead = {
1736                        .setkey = aead_setkey,
1737                        .setauthsize = aead_setauthsize,
1738                        .encrypt = aead_encrypt,
1739                        .decrypt = aead_decrypt,
1740                        .givencrypt = aead_givencrypt,
1741                        .geniv = "<built-in>",
1742                        .ivsize = AES_BLOCK_SIZE,
1743                        .maxauthsize = SHA384_DIGEST_SIZE,
1744                        },
1745                .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1746                .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1747                                   OP_ALG_AAI_HMAC_PRECOMP,
1748                .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1749        },
1750
1751        {
1752                .name = "authenc(hmac(sha512),cbc(aes))",
1753                .driver_name = "authenc-hmac-sha512-cbc-aes-caam",
1754                .blocksize = AES_BLOCK_SIZE,
1755                .type = CRYPTO_ALG_TYPE_AEAD,
1756                .template_aead = {
1757                        .setkey = aead_setkey,
1758                        .setauthsize = aead_setauthsize,
1759                        .encrypt = aead_encrypt,
1760                        .decrypt = aead_decrypt,
1761                        .givencrypt = aead_givencrypt,
1762                        .geniv = "<built-in>",
1763                        .ivsize = AES_BLOCK_SIZE,
1764                        .maxauthsize = SHA512_DIGEST_SIZE,
1765                        },
1766                .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1767                .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1768                                   OP_ALG_AAI_HMAC_PRECOMP,
1769                .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1770        },
1771        {
1772                .name = "authenc(hmac(md5),cbc(des3_ede))",
1773                .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
1774                .blocksize = DES3_EDE_BLOCK_SIZE,
1775                .type = CRYPTO_ALG_TYPE_AEAD,
1776                .template_aead = {
1777                        .setkey = aead_setkey,
1778                        .setauthsize = aead_setauthsize,
1779                        .encrypt = aead_encrypt,
1780                        .decrypt = aead_decrypt,
1781                        .givencrypt = aead_givencrypt,
1782                        .geniv = "<built-in>",
1783                        .ivsize = DES3_EDE_BLOCK_SIZE,
1784                        .maxauthsize = MD5_DIGEST_SIZE,
1785                        },
1786                .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1787                .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1788                .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1789        },
1790        {
1791                .name = "authenc(hmac(sha1),cbc(des3_ede))",
1792                .driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
1793                .blocksize = DES3_EDE_BLOCK_SIZE,
1794                .type = CRYPTO_ALG_TYPE_AEAD,
1795                .template_aead = {
1796                        .setkey = aead_setkey,
1797                        .setauthsize = aead_setauthsize,
1798                        .encrypt = aead_encrypt,
1799                        .decrypt = aead_decrypt,
1800                        .givencrypt = aead_givencrypt,
1801                        .geniv = "<built-in>",
1802                        .ivsize = DES3_EDE_BLOCK_SIZE,
1803                        .maxauthsize = SHA1_DIGEST_SIZE,
1804                        },
1805                .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1806                .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1807                .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1808        },
1809        {
1810                .name = "authenc(hmac(sha224),cbc(des3_ede))",
1811                .driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
1812                .blocksize = DES3_EDE_BLOCK_SIZE,
1813                .template_aead = {
1814                        .setkey = aead_setkey,
1815                        .setauthsize = aead_setauthsize,
1816                        .encrypt = aead_encrypt,
1817                        .decrypt = aead_decrypt,
1818                        .givencrypt = aead_givencrypt,
1819                        .geniv = "<built-in>",
1820                        .ivsize = DES3_EDE_BLOCK_SIZE,
1821                        .maxauthsize = SHA224_DIGEST_SIZE,
1822                        },
1823                .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1824                .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1825                                   OP_ALG_AAI_HMAC_PRECOMP,
1826                .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1827        },
1828        {
1829                .name = "authenc(hmac(sha256),cbc(des3_ede))",
1830                .driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
1831                .blocksize = DES3_EDE_BLOCK_SIZE,
1832                .type = CRYPTO_ALG_TYPE_AEAD,
1833                .template_aead = {
1834                        .setkey = aead_setkey,
1835                        .setauthsize = aead_setauthsize,
1836                        .encrypt = aead_encrypt,
1837                        .decrypt = aead_decrypt,
1838                        .givencrypt = aead_givencrypt,
1839                        .geniv = "<built-in>",
1840                        .ivsize = DES3_EDE_BLOCK_SIZE,
1841                        .maxauthsize = SHA256_DIGEST_SIZE,
1842                        },
1843                .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1844                .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1845                                   OP_ALG_AAI_HMAC_PRECOMP,
1846                .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1847        },
1848        {
1849                .name = "authenc(hmac(sha384),cbc(des3_ede))",
1850                .driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
1851                .blocksize = DES3_EDE_BLOCK_SIZE,
1852                .template_aead = {
1853                        .setkey = aead_setkey,
1854                        .setauthsize = aead_setauthsize,
1855                        .encrypt = aead_encrypt,
1856                        .decrypt = aead_decrypt,
1857                        .givencrypt = aead_givencrypt,
1858                        .geniv = "<built-in>",
1859                        .ivsize = DES3_EDE_BLOCK_SIZE,
1860                        .maxauthsize = SHA384_DIGEST_SIZE,
1861                        },
1862                .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1863                .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1864                                   OP_ALG_AAI_HMAC_PRECOMP,
1865                .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1866        },
1867        {
1868                .name = "authenc(hmac(sha512),cbc(des3_ede))",
1869                .driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
1870                .blocksize = DES3_EDE_BLOCK_SIZE,
1871                .type = CRYPTO_ALG_TYPE_AEAD,
1872                .template_aead = {
1873                        .setkey = aead_setkey,
1874                        .setauthsize = aead_setauthsize,
1875                        .encrypt = aead_encrypt,
1876                        .decrypt = aead_decrypt,
1877                        .givencrypt = aead_givencrypt,
1878                        .geniv = "<built-in>",
1879                        .ivsize = DES3_EDE_BLOCK_SIZE,
1880                        .maxauthsize = SHA512_DIGEST_SIZE,
1881                        },
1882                .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1883                .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1884                                   OP_ALG_AAI_HMAC_PRECOMP,
1885                .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1886        },
1887        {
1888                .name = "authenc(hmac(md5),cbc(des))",
1889                .driver_name = "authenc-hmac-md5-cbc-des-caam",
1890                .blocksize = DES_BLOCK_SIZE,
1891                .type = CRYPTO_ALG_TYPE_AEAD,
1892                .template_aead = {
1893                        .setkey = aead_setkey,
1894                        .setauthsize = aead_setauthsize,
1895                        .encrypt = aead_encrypt,
1896                        .decrypt = aead_decrypt,
1897                        .givencrypt = aead_givencrypt,
1898                        .geniv = "<built-in>",
1899                        .ivsize = DES_BLOCK_SIZE,
1900                        .maxauthsize = MD5_DIGEST_SIZE,
1901                        },
1902                .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1903                .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1904                .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1905        },
1906        {
1907                .name = "authenc(hmac(sha1),cbc(des))",
1908                .driver_name = "authenc-hmac-sha1-cbc-des-caam",
1909                .blocksize = DES_BLOCK_SIZE,
1910                .type = CRYPTO_ALG_TYPE_AEAD,
1911                .template_aead = {
1912                        .setkey = aead_setkey,
1913                        .setauthsize = aead_setauthsize,
1914                        .encrypt = aead_encrypt,
1915                        .decrypt = aead_decrypt,
1916                        .givencrypt = aead_givencrypt,
1917                        .geniv = "<built-in>",
1918                        .ivsize = DES_BLOCK_SIZE,
1919                        .maxauthsize = SHA1_DIGEST_SIZE,
1920                        },
1921                .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1922                .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1923                .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1924        },
1925        {
1926                .name = "authenc(hmac(sha224),cbc(des))",
1927                .driver_name = "authenc-hmac-sha224-cbc-des-caam",
1928                .blocksize = DES_BLOCK_SIZE,
1929                .template_aead = {
1930                        .setkey = aead_setkey,
1931                        .setauthsize = aead_setauthsize,
1932                        .encrypt = aead_encrypt,
1933                        .decrypt = aead_decrypt,
1934                        .givencrypt = aead_givencrypt,
1935                        .geniv = "<built-in>",
1936                        .ivsize = DES_BLOCK_SIZE,
1937                        .maxauthsize = SHA224_DIGEST_SIZE,
1938                        },
1939                .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1940                .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1941                                   OP_ALG_AAI_HMAC_PRECOMP,
1942                .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1943        },
1944        {
1945                .name = "authenc(hmac(sha256),cbc(des))",
1946                .driver_name = "authenc-hmac-sha256-cbc-des-caam",
1947                .blocksize = DES_BLOCK_SIZE,
1948                .type = CRYPTO_ALG_TYPE_AEAD,
1949                .template_aead = {
1950                        .setkey = aead_setkey,
1951                        .setauthsize = aead_setauthsize,
1952                        .encrypt = aead_encrypt,
1953                        .decrypt = aead_decrypt,
1954                        .givencrypt = aead_givencrypt,
1955                        .geniv = "<built-in>",
1956                        .ivsize = DES_BLOCK_SIZE,
1957                        .maxauthsize = SHA256_DIGEST_SIZE,
1958                        },
1959                .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1960                .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1961                                   OP_ALG_AAI_HMAC_PRECOMP,
1962                .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1963        },
1964        {
1965                .name = "authenc(hmac(sha384),cbc(des))",
1966                .driver_name = "authenc-hmac-sha384-cbc-des-caam",
1967                .blocksize = DES_BLOCK_SIZE,
1968                .template_aead = {
1969                        .setkey = aead_setkey,
1970                        .setauthsize = aead_setauthsize,
1971                        .encrypt = aead_encrypt,
1972                        .decrypt = aead_decrypt,
1973                        .givencrypt = aead_givencrypt,
1974                        .geniv = "<built-in>",
1975                        .ivsize = DES_BLOCK_SIZE,
1976                        .maxauthsize = SHA384_DIGEST_SIZE,
1977                        },
1978                .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1979                .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1980                                   OP_ALG_AAI_HMAC_PRECOMP,
1981                .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1982        },
1983        {
1984                .name = "authenc(hmac(sha512),cbc(des))",
1985                .driver_name = "authenc-hmac-sha512-cbc-des-caam",
1986                .blocksize = DES_BLOCK_SIZE,
1987                .type = CRYPTO_ALG_TYPE_AEAD,
1988                .template_aead = {
1989                        .setkey = aead_setkey,
1990                        .setauthsize = aead_setauthsize,
1991                        .encrypt = aead_encrypt,
1992                        .decrypt = aead_decrypt,
1993                        .givencrypt = aead_givencrypt,
1994                        .geniv = "<built-in>",
1995                        .ivsize = DES_BLOCK_SIZE,
1996                        .maxauthsize = SHA512_DIGEST_SIZE,
1997                        },
1998                .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1999                .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2000                                   OP_ALG_AAI_HMAC_PRECOMP,
2001                .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2002        },
2003        /* ablkcipher descriptor */
2004        {
2005                .name = "cbc(aes)",
2006                .driver_name = "cbc-aes-caam",
2007                .blocksize = AES_BLOCK_SIZE,
2008                .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2009                .template_ablkcipher = {
2010                        .setkey = ablkcipher_setkey,
2011                        .encrypt = ablkcipher_encrypt,
2012                        .decrypt = ablkcipher_decrypt,
2013                        .geniv = "eseqiv",
2014                        .min_keysize = AES_MIN_KEY_SIZE,
2015                        .max_keysize = AES_MAX_KEY_SIZE,
2016                        .ivsize = AES_BLOCK_SIZE,
2017                        },
2018                .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2019        },
2020        {
2021                .name = "cbc(des3_ede)",
2022                .driver_name = "cbc-3des-caam",
2023                .blocksize = DES3_EDE_BLOCK_SIZE,
2024                .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2025                .template_ablkcipher = {
2026                        .setkey = ablkcipher_setkey,
2027                        .encrypt = ablkcipher_encrypt,
2028                        .decrypt = ablkcipher_decrypt,
2029                        .geniv = "eseqiv",
2030                        .min_keysize = DES3_EDE_KEY_SIZE,
2031                        .max_keysize = DES3_EDE_KEY_SIZE,
2032                        .ivsize = DES3_EDE_BLOCK_SIZE,
2033                        },
2034                .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2035        },
2036        {
2037                .name = "cbc(des)",
2038                .driver_name = "cbc-des-caam",
2039                .blocksize = DES_BLOCK_SIZE,
2040                .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2041                .template_ablkcipher = {
2042                        .setkey = ablkcipher_setkey,
2043                        .encrypt = ablkcipher_encrypt,
2044                        .decrypt = ablkcipher_decrypt,
2045                        .geniv = "eseqiv",
2046                        .min_keysize = DES_KEY_SIZE,
2047                        .max_keysize = DES_KEY_SIZE,
2048                        .ivsize = DES_BLOCK_SIZE,
2049                        },
2050                .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2051        }
2052};
2053
2054struct caam_crypto_alg {
2055        struct list_head entry;
2056        struct device *ctrldev;
2057        int class1_alg_type;
2058        int class2_alg_type;
2059        int alg_op;
2060        struct crypto_alg crypto_alg;
2061};
2062
2063static int caam_cra_init(struct crypto_tfm *tfm)
2064{
2065        struct crypto_alg *alg = tfm->__crt_alg;
2066        struct caam_crypto_alg *caam_alg =
2067                 container_of(alg, struct caam_crypto_alg, crypto_alg);
2068        struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2069        struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev);
2070        int tgt_jr = atomic_inc_return(&priv->tfm_count);
2071
2072        /*
2073         * distribute tfms across job rings to ensure in-order
2074         * crypto request processing per tfm
2075         */
2076        ctx->jrdev = priv->jrdev[(tgt_jr / 2) % priv->total_jobrs];
2077
2078        /* copy descriptor header template value */
2079        ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
2080        ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
2081        ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
2082
2083        return 0;
2084}
2085
2086static void caam_cra_exit(struct crypto_tfm *tfm)
2087{
2088        struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2089
2090        if (ctx->sh_desc_enc_dma &&
2091            !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
2092                dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
2093                                 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
2094        if (ctx->sh_desc_dec_dma &&
2095            !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
2096                dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
2097                                 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
2098        if (ctx->sh_desc_givenc_dma &&
2099            !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
2100                dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
2101                                 desc_bytes(ctx->sh_desc_givenc),
2102                                 DMA_TO_DEVICE);
2103}
2104
2105static void __exit caam_algapi_exit(void)
2106{
2107
2108        struct device_node *dev_node;
2109        struct platform_device *pdev;
2110        struct device *ctrldev;
2111        struct caam_drv_private *priv;
2112        struct caam_crypto_alg *t_alg, *n;
2113
2114        dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2115        if (!dev_node) {
2116                dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2117                if (!dev_node)
2118                        return;
2119        }
2120
2121        pdev = of_find_device_by_node(dev_node);
2122        if (!pdev)
2123                return;
2124
2125        ctrldev = &pdev->dev;
2126        of_node_put(dev_node);
2127        priv = dev_get_drvdata(ctrldev);
2128
2129        if (!priv->alg_list.next)
2130                return;
2131
2132        list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2133                crypto_unregister_alg(&t_alg->crypto_alg);
2134                list_del(&t_alg->entry);
2135                kfree(t_alg);
2136        }
2137}
2138
2139static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
2140                                              struct caam_alg_template
2141                                              *template)
2142{
2143        struct caam_crypto_alg *t_alg;
2144        struct crypto_alg *alg;
2145
2146        t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
2147        if (!t_alg) {
2148                dev_err(ctrldev, "failed to allocate t_alg\n");
2149                return ERR_PTR(-ENOMEM);
2150        }
2151
2152        alg = &t_alg->crypto_alg;
2153
2154        snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
2155        snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2156                 template->driver_name);
2157        alg->cra_module = THIS_MODULE;
2158        alg->cra_init = caam_cra_init;
2159        alg->cra_exit = caam_cra_exit;
2160        alg->cra_priority = CAAM_CRA_PRIORITY;
2161        alg->cra_blocksize = template->blocksize;
2162        alg->cra_alignmask = 0;
2163        alg->cra_ctxsize = sizeof(struct caam_ctx);
2164        alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2165                         template->type;
2166        switch (template->type) {
2167        case CRYPTO_ALG_TYPE_ABLKCIPHER:
2168                alg->cra_type = &crypto_ablkcipher_type;
2169                alg->cra_ablkcipher = template->template_ablkcipher;
2170                break;
2171        case CRYPTO_ALG_TYPE_AEAD:
2172                alg->cra_type = &crypto_aead_type;
2173                alg->cra_aead = template->template_aead;
2174                break;
2175        }
2176
2177        t_alg->class1_alg_type = template->class1_alg_type;
2178        t_alg->class2_alg_type = template->class2_alg_type;
2179        t_alg->alg_op = template->alg_op;
2180        t_alg->ctrldev = ctrldev;
2181
2182        return t_alg;
2183}
2184
2185static int __init caam_algapi_init(void)
2186{
2187        struct device_node *dev_node;
2188        struct platform_device *pdev;
2189        struct device *ctrldev;
2190        struct caam_drv_private *priv;
2191        int i = 0, err = 0;
2192
2193        dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2194        if (!dev_node) {
2195                dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2196                if (!dev_node)
2197                        return -ENODEV;
2198        }
2199
2200        pdev = of_find_device_by_node(dev_node);
2201        if (!pdev)
2202                return -ENODEV;
2203
2204        ctrldev = &pdev->dev;
2205        priv = dev_get_drvdata(ctrldev);
2206        of_node_put(dev_node);
2207
2208        INIT_LIST_HEAD(&priv->alg_list);
2209
2210        atomic_set(&priv->tfm_count, -1);
2211
2212        /* register crypto algorithms the device supports */
2213        for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2214                /* TODO: check if h/w supports alg */
2215                struct caam_crypto_alg *t_alg;
2216
2217                t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]);
2218                if (IS_ERR(t_alg)) {
2219                        err = PTR_ERR(t_alg);
2220                        dev_warn(ctrldev, "%s alg allocation failed\n",
2221                                 driver_algs[i].driver_name);
2222                        continue;
2223                }
2224
2225                err = crypto_register_alg(&t_alg->crypto_alg);
2226                if (err) {
2227                        dev_warn(ctrldev, "%s alg registration failed\n",
2228                                t_alg->crypto_alg.cra_driver_name);
2229                        kfree(t_alg);
2230                } else
2231                        list_add_tail(&t_alg->entry, &priv->alg_list);
2232        }
2233        if (!list_empty(&priv->alg_list))
2234                dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n",
2235                         (char *)of_get_property(dev_node, "compatible", NULL));
2236
2237        return err;
2238}
2239
2240module_init(caam_algapi_init);
2241module_exit(caam_algapi_exit);
2242
2243MODULE_LICENSE("GPL");
2244MODULE_DESCRIPTION("FSL CAAM support for crypto API");
2245MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
2246