linux/drivers/crypto/keembay/keembay-ocs-aes-core.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Intel Keem Bay OCS AES Crypto Driver.
   4 *
   5 * Copyright (C) 2018-2020 Intel Corporation
   6 */
   7
   8#include <linux/clk.h>
   9#include <linux/completion.h>
  10#include <linux/crypto.h>
  11#include <linux/dma-mapping.h>
  12#include <linux/interrupt.h>
  13#include <linux/io.h>
  14#include <linux/module.h>
  15#include <linux/of.h>
  16#include <linux/platform_device.h>
  17#include <linux/types.h>
  18
  19#include <crypto/aes.h>
  20#include <crypto/engine.h>
  21#include <crypto/gcm.h>
  22#include <crypto/scatterwalk.h>
  23
  24#include <crypto/internal/aead.h>
  25#include <crypto/internal/skcipher.h>
  26
  27#include "ocs-aes.h"
  28
  29#define KMB_OCS_PRIORITY        350
  30#define DRV_NAME                "keembay-ocs-aes"
  31
  32#define OCS_AES_MIN_KEY_SIZE    16
  33#define OCS_AES_MAX_KEY_SIZE    32
  34#define OCS_AES_KEYSIZE_128     16
  35#define OCS_AES_KEYSIZE_192     24
  36#define OCS_AES_KEYSIZE_256     32
  37#define OCS_SM4_KEY_SIZE        16
  38
  39/**
  40 * struct ocs_aes_tctx - OCS AES Transform context
  41 * @engine_ctx:         Engine context.
  42 * @aes_dev:            The OCS AES device.
  43 * @key:                AES/SM4 key.
  44 * @key_len:            The length (in bytes) of @key.
  45 * @cipher:             OCS cipher to use (either AES or SM4).
  46 * @sw_cipher:          The cipher to use as fallback.
  47 * @use_fallback:       Whether or not fallback cipher should be used.
  48 */
  49struct ocs_aes_tctx {
  50        struct crypto_engine_ctx engine_ctx;
  51        struct ocs_aes_dev *aes_dev;
  52        u8 key[OCS_AES_KEYSIZE_256];
  53        unsigned int key_len;
  54        enum ocs_cipher cipher;
  55        union {
  56                struct crypto_sync_skcipher *sk;
  57                struct crypto_aead *aead;
  58        } sw_cipher;
  59        bool use_fallback;
  60};
  61
  62/**
  63 * struct ocs_aes_rctx - OCS AES Request context.
  64 * @instruction:        Instruction to be executed (encrypt / decrypt).
  65 * @mode:               Mode to use (ECB, CBC, CTR, CCm, GCM, CTS)
  66 * @src_nents:          Number of source SG entries.
  67 * @dst_nents:          Number of destination SG entries.
  68 * @src_dma_count:      The number of DMA-mapped entries of the source SG.
  69 * @dst_dma_count:      The number of DMA-mapped entries of the destination SG.
  70 * @in_place:           Whether or not this is an in place request, i.e.,
  71 *                      src_sg == dst_sg.
  72 * @src_dll:            OCS DMA linked list for input data.
  73 * @dst_dll:            OCS DMA linked list for output data.
  74 * @last_ct_blk:        Buffer to hold last cipher text block (only used in CBC
  75 *                      mode).
  76 * @cts_swap:           Whether or not CTS swap must be performed.
  77 * @aad_src_dll:        OCS DMA linked list for input AAD data.
  78 * @aad_dst_dll:        OCS DMA linked list for output AAD data.
  79 * @in_tag:             Buffer to hold input encrypted tag (only used for
  80 *                      CCM/GCM decrypt).
  81 * @out_tag:            Buffer to hold output encrypted / decrypted tag (only
  82 *                      used for GCM encrypt / decrypt).
  83 */
  84struct ocs_aes_rctx {
  85        /* Fields common across all modes. */
  86        enum ocs_instruction    instruction;
  87        enum ocs_mode           mode;
  88        int                     src_nents;
  89        int                     dst_nents;
  90        int                     src_dma_count;
  91        int                     dst_dma_count;
  92        bool                    in_place;
  93        struct ocs_dll_desc     src_dll;
  94        struct ocs_dll_desc     dst_dll;
  95
  96        /* CBC specific */
  97        u8                      last_ct_blk[AES_BLOCK_SIZE];
  98
  99        /* CTS specific */
 100        int                     cts_swap;
 101
 102        /* CCM/GCM specific */
 103        struct ocs_dll_desc     aad_src_dll;
 104        struct ocs_dll_desc     aad_dst_dll;
 105        u8                      in_tag[AES_BLOCK_SIZE];
 106
 107        /* GCM specific */
 108        u8                      out_tag[AES_BLOCK_SIZE];
 109};
 110
 111/* Driver data. */
 112struct ocs_aes_drv {
 113        struct list_head dev_list;
 114        spinlock_t lock;        /* Protects dev_list. */
 115};
 116
 117static struct ocs_aes_drv ocs_aes = {
 118        .dev_list = LIST_HEAD_INIT(ocs_aes.dev_list),
 119        .lock = __SPIN_LOCK_UNLOCKED(ocs_aes.lock),
 120};
 121
 122static struct ocs_aes_dev *kmb_ocs_aes_find_dev(struct ocs_aes_tctx *tctx)
 123{
 124        struct ocs_aes_dev *aes_dev;
 125
 126        spin_lock(&ocs_aes.lock);
 127
 128        if (tctx->aes_dev) {
 129                aes_dev = tctx->aes_dev;
 130                goto exit;
 131        }
 132
 133        /* Only a single OCS device available */
 134        aes_dev = list_first_entry(&ocs_aes.dev_list, struct ocs_aes_dev, list);
 135        tctx->aes_dev = aes_dev;
 136
 137exit:
 138        spin_unlock(&ocs_aes.lock);
 139
 140        return aes_dev;
 141}
 142
 143/*
 144 * Ensure key is 128-bit or 256-bit for AES or 128-bit for SM4 and an actual
 145 * key is being passed in.
 146 *
 147 * Return: 0 if key is valid, -EINVAL otherwise.
 148 */
 149static int check_key(const u8 *in_key, size_t key_len, enum ocs_cipher cipher)
 150{
 151        if (!in_key)
 152                return -EINVAL;
 153
 154        /* For AES, only 128-byte or 256-byte keys are supported. */
 155        if (cipher == OCS_AES && (key_len == OCS_AES_KEYSIZE_128 ||
 156                                  key_len == OCS_AES_KEYSIZE_256))
 157                return 0;
 158
 159        /* For SM4, only 128-byte keys are supported. */
 160        if (cipher == OCS_SM4 && key_len == OCS_AES_KEYSIZE_128)
 161                return 0;
 162
 163        /* Everything else is unsupported. */
 164        return -EINVAL;
 165}
 166
 167/* Save key into transformation context. */
 168static int save_key(struct ocs_aes_tctx *tctx, const u8 *in_key, size_t key_len,
 169                    enum ocs_cipher cipher)
 170{
 171        int ret;
 172
 173        ret = check_key(in_key, key_len, cipher);
 174        if (ret)
 175                return ret;
 176
 177        memcpy(tctx->key, in_key, key_len);
 178        tctx->key_len = key_len;
 179        tctx->cipher = cipher;
 180
 181        return 0;
 182}
 183
 184/* Set key for symmetric cypher. */
 185static int kmb_ocs_sk_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
 186                              size_t key_len, enum ocs_cipher cipher)
 187{
 188        struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
 189
 190        /* Fallback is used for AES with 192-bit key. */
 191        tctx->use_fallback = (cipher == OCS_AES &&
 192                              key_len == OCS_AES_KEYSIZE_192);
 193
 194        if (!tctx->use_fallback)
 195                return save_key(tctx, in_key, key_len, cipher);
 196
 197        crypto_sync_skcipher_clear_flags(tctx->sw_cipher.sk,
 198                                         CRYPTO_TFM_REQ_MASK);
 199        crypto_sync_skcipher_set_flags(tctx->sw_cipher.sk,
 200                                       tfm->base.crt_flags &
 201                                       CRYPTO_TFM_REQ_MASK);
 202
 203        return crypto_sync_skcipher_setkey(tctx->sw_cipher.sk, in_key, key_len);
 204}
 205
 206/* Set key for AEAD cipher. */
 207static int kmb_ocs_aead_set_key(struct crypto_aead *tfm, const u8 *in_key,
 208                                size_t key_len, enum ocs_cipher cipher)
 209{
 210        struct ocs_aes_tctx *tctx = crypto_aead_ctx(tfm);
 211
 212        /* Fallback is used for AES with 192-bit key. */
 213        tctx->use_fallback = (cipher == OCS_AES &&
 214                              key_len == OCS_AES_KEYSIZE_192);
 215
 216        if (!tctx->use_fallback)
 217                return save_key(tctx, in_key, key_len, cipher);
 218
 219        crypto_aead_clear_flags(tctx->sw_cipher.aead, CRYPTO_TFM_REQ_MASK);
 220        crypto_aead_set_flags(tctx->sw_cipher.aead,
 221                              crypto_aead_get_flags(tfm) & CRYPTO_TFM_REQ_MASK);
 222
 223        return crypto_aead_setkey(tctx->sw_cipher.aead, in_key, key_len);
 224}
 225
 226/* Swap two AES blocks in SG lists. */
 227static void sg_swap_blocks(struct scatterlist *sgl, unsigned int nents,
 228                           off_t blk1_offset, off_t blk2_offset)
 229{
 230        u8 tmp_buf1[AES_BLOCK_SIZE], tmp_buf2[AES_BLOCK_SIZE];
 231
 232        /*
 233         * No easy way to copy within sg list, so copy both blocks to temporary
 234         * buffers first.
 235         */
 236        sg_pcopy_to_buffer(sgl, nents, tmp_buf1, AES_BLOCK_SIZE, blk1_offset);
 237        sg_pcopy_to_buffer(sgl, nents, tmp_buf2, AES_BLOCK_SIZE, blk2_offset);
 238        sg_pcopy_from_buffer(sgl, nents, tmp_buf1, AES_BLOCK_SIZE, blk2_offset);
 239        sg_pcopy_from_buffer(sgl, nents, tmp_buf2, AES_BLOCK_SIZE, blk1_offset);
 240}
 241
 242/* Initialize request context to default values. */
 243static void ocs_aes_init_rctx(struct ocs_aes_rctx *rctx)
 244{
 245        /* Zero everything. */
 246        memset(rctx, 0, sizeof(*rctx));
 247
 248        /* Set initial value for DMA addresses. */
 249        rctx->src_dll.dma_addr = DMA_MAPPING_ERROR;
 250        rctx->dst_dll.dma_addr = DMA_MAPPING_ERROR;
 251        rctx->aad_src_dll.dma_addr = DMA_MAPPING_ERROR;
 252        rctx->aad_dst_dll.dma_addr = DMA_MAPPING_ERROR;
 253}
 254
 255static int kmb_ocs_sk_validate_input(struct skcipher_request *req,
 256                                     enum ocs_mode mode)
 257{
 258        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 259        int iv_size = crypto_skcipher_ivsize(tfm);
 260
 261        switch (mode) {
 262        case OCS_MODE_ECB:
 263                /* Ensure input length is multiple of block size */
 264                if (req->cryptlen % AES_BLOCK_SIZE != 0)
 265                        return -EINVAL;
 266
 267                return 0;
 268
 269        case OCS_MODE_CBC:
 270                /* Ensure input length is multiple of block size */
 271                if (req->cryptlen % AES_BLOCK_SIZE != 0)
 272                        return -EINVAL;
 273
 274                /* Ensure IV is present and block size in length */
 275                if (!req->iv || iv_size != AES_BLOCK_SIZE)
 276                        return -EINVAL;
 277                /*
 278                 * NOTE: Since req->cryptlen == 0 case was already handled in
 279                 * kmb_ocs_sk_common(), the above two conditions also guarantee
 280                 * that: cryptlen >= iv_size
 281                 */
 282                return 0;
 283
 284        case OCS_MODE_CTR:
 285                /* Ensure IV is present and block size in length */
 286                if (!req->iv || iv_size != AES_BLOCK_SIZE)
 287                        return -EINVAL;
 288                return 0;
 289
 290        case OCS_MODE_CTS:
 291                /* Ensure input length >= block size */
 292                if (req->cryptlen < AES_BLOCK_SIZE)
 293                        return -EINVAL;
 294
 295                /* Ensure IV is present and block size in length */
 296                if (!req->iv || iv_size != AES_BLOCK_SIZE)
 297                        return -EINVAL;
 298
 299                return 0;
 300        default:
 301                return -EINVAL;
 302        }
 303}
 304
 305/*
 306 * Called by encrypt() / decrypt() skcipher functions.
 307 *
 308 * Use fallback if needed, otherwise initialize context and enqueue request
 309 * into engine.
 310 */
 311static int kmb_ocs_sk_common(struct skcipher_request *req,
 312                             enum ocs_cipher cipher,
 313                             enum ocs_instruction instruction,
 314                             enum ocs_mode mode)
 315{
 316        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 317        struct ocs_aes_rctx *rctx = skcipher_request_ctx(req);
 318        struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
 319        struct ocs_aes_dev *aes_dev;
 320        int rc;
 321
 322        if (tctx->use_fallback) {
 323                SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, tctx->sw_cipher.sk);
 324
 325                skcipher_request_set_sync_tfm(subreq, tctx->sw_cipher.sk);
 326                skcipher_request_set_callback(subreq, req->base.flags, NULL,
 327                                              NULL);
 328                skcipher_request_set_crypt(subreq, req->src, req->dst,
 329                                           req->cryptlen, req->iv);
 330
 331                if (instruction == OCS_ENCRYPT)
 332                        rc = crypto_skcipher_encrypt(subreq);
 333                else
 334                        rc = crypto_skcipher_decrypt(subreq);
 335
 336                skcipher_request_zero(subreq);
 337
 338                return rc;
 339        }
 340
 341        /*
 342         * If cryptlen == 0, no processing needed for ECB, CBC and CTR.
 343         *
 344         * For CTS continue: kmb_ocs_sk_validate_input() will return -EINVAL.
 345         */
 346        if (!req->cryptlen && mode != OCS_MODE_CTS)
 347                return 0;
 348
 349        rc = kmb_ocs_sk_validate_input(req, mode);
 350        if (rc)
 351                return rc;
 352
 353        aes_dev = kmb_ocs_aes_find_dev(tctx);
 354        if (!aes_dev)
 355                return -ENODEV;
 356
 357        if (cipher != tctx->cipher)
 358                return -EINVAL;
 359
 360        ocs_aes_init_rctx(rctx);
 361        rctx->instruction = instruction;
 362        rctx->mode = mode;
 363
 364        return crypto_transfer_skcipher_request_to_engine(aes_dev->engine, req);
 365}
 366
 367static void cleanup_ocs_dma_linked_list(struct device *dev,
 368                                        struct ocs_dll_desc *dll)
 369{
 370        if (dll->vaddr)
 371                dma_free_coherent(dev, dll->size, dll->vaddr, dll->dma_addr);
 372        dll->vaddr = NULL;
 373        dll->size = 0;
 374        dll->dma_addr = DMA_MAPPING_ERROR;
 375}
 376
 377static void kmb_ocs_sk_dma_cleanup(struct skcipher_request *req)
 378{
 379        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 380        struct ocs_aes_rctx *rctx = skcipher_request_ctx(req);
 381        struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
 382        struct device *dev = tctx->aes_dev->dev;
 383
 384        if (rctx->src_dma_count) {
 385                dma_unmap_sg(dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
 386                rctx->src_dma_count = 0;
 387        }
 388
 389        if (rctx->dst_dma_count) {
 390                dma_unmap_sg(dev, req->dst, rctx->dst_nents, rctx->in_place ?
 391                                                             DMA_BIDIRECTIONAL :
 392                                                             DMA_FROM_DEVICE);
 393                rctx->dst_dma_count = 0;
 394        }
 395
 396        /* Clean up OCS DMA linked lists */
 397        cleanup_ocs_dma_linked_list(dev, &rctx->src_dll);
 398        cleanup_ocs_dma_linked_list(dev, &rctx->dst_dll);
 399}
 400
 401static int kmb_ocs_sk_prepare_inplace(struct skcipher_request *req)
 402{
 403        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 404        struct ocs_aes_rctx *rctx = skcipher_request_ctx(req);
 405        struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
 406        int iv_size = crypto_skcipher_ivsize(tfm);
 407        int rc;
 408
 409        /*
 410         * For CBC decrypt, save last block (iv) to last_ct_blk buffer.
 411         *
 412         * Note: if we are here, we already checked that cryptlen >= iv_size
 413         * and iv_size == AES_BLOCK_SIZE (i.e., the size of last_ct_blk); see
 414         * kmb_ocs_sk_validate_input().
 415         */
 416        if (rctx->mode == OCS_MODE_CBC && rctx->instruction == OCS_DECRYPT)
 417                scatterwalk_map_and_copy(rctx->last_ct_blk, req->src,
 418                                         req->cryptlen - iv_size, iv_size, 0);
 419
 420        /* For CTS decrypt, swap last two blocks, if needed. */
 421        if (rctx->cts_swap && rctx->instruction == OCS_DECRYPT)
 422                sg_swap_blocks(req->dst, rctx->dst_nents,
 423                               req->cryptlen - AES_BLOCK_SIZE,
 424                               req->cryptlen - (2 * AES_BLOCK_SIZE));
 425
 426        /* src and dst buffers are the same, use bidirectional DMA mapping. */
 427        rctx->dst_dma_count = dma_map_sg(tctx->aes_dev->dev, req->dst,
 428                                         rctx->dst_nents, DMA_BIDIRECTIONAL);
 429        if (rctx->dst_dma_count == 0) {
 430                dev_err(tctx->aes_dev->dev, "Failed to map destination sg\n");
 431                return -ENOMEM;
 432        }
 433
 434        /* Create DST linked list */
 435        rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->dst,
 436                                            rctx->dst_dma_count, &rctx->dst_dll,
 437                                            req->cryptlen, 0);
 438        if (rc)
 439                return rc;
 440        /*
 441         * If descriptor creation was successful, set the src_dll.dma_addr to
 442         * the value of dst_dll.dma_addr, as we do in-place AES operation on
 443         * the src.
 444         */
 445        rctx->src_dll.dma_addr = rctx->dst_dll.dma_addr;
 446
 447        return 0;
 448}
 449
 450static int kmb_ocs_sk_prepare_notinplace(struct skcipher_request *req)
 451{
 452        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 453        struct ocs_aes_rctx *rctx = skcipher_request_ctx(req);
 454        struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
 455        int rc;
 456
 457        rctx->src_nents =  sg_nents_for_len(req->src, req->cryptlen);
 458        if (rctx->src_nents < 0)
 459                return -EBADMSG;
 460
 461        /* Map SRC SG. */
 462        rctx->src_dma_count = dma_map_sg(tctx->aes_dev->dev, req->src,
 463                                         rctx->src_nents, DMA_TO_DEVICE);
 464        if (rctx->src_dma_count == 0) {
 465                dev_err(tctx->aes_dev->dev, "Failed to map source sg\n");
 466                return -ENOMEM;
 467        }
 468
 469        /* Create SRC linked list */
 470        rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->src,
 471                                            rctx->src_dma_count, &rctx->src_dll,
 472                                            req->cryptlen, 0);
 473        if (rc)
 474                return rc;
 475
 476        /* Map DST SG. */
 477        rctx->dst_dma_count = dma_map_sg(tctx->aes_dev->dev, req->dst,
 478                                         rctx->dst_nents, DMA_FROM_DEVICE);
 479        if (rctx->dst_dma_count == 0) {
 480                dev_err(tctx->aes_dev->dev, "Failed to map destination sg\n");
 481                return -ENOMEM;
 482        }
 483
 484        /* Create DST linked list */
 485        rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->dst,
 486                                            rctx->dst_dma_count, &rctx->dst_dll,
 487                                            req->cryptlen, 0);
 488        if (rc)
 489                return rc;
 490
 491        /* If this is not a CTS decrypt operation with swapping, we are done. */
 492        if (!(rctx->cts_swap && rctx->instruction == OCS_DECRYPT))
 493                return 0;
 494
 495        /*
 496         * Otherwise, we have to copy src to dst (as we cannot modify src).
 497         * Use OCS AES bypass mode to copy src to dst via DMA.
 498         *
 499         * NOTE: for anything other than small data sizes this is rather
 500         * inefficient.
 501         */
 502        rc = ocs_aes_bypass_op(tctx->aes_dev, rctx->dst_dll.dma_addr,
 503                               rctx->src_dll.dma_addr, req->cryptlen);
 504        if (rc)
 505                return rc;
 506
 507        /*
 508         * Now dst == src, so clean up what we did so far and use in_place
 509         * logic.
 510         */
 511        kmb_ocs_sk_dma_cleanup(req);
 512        rctx->in_place = true;
 513
 514        return kmb_ocs_sk_prepare_inplace(req);
 515}
 516
 517static int kmb_ocs_sk_run(struct skcipher_request *req)
 518{
 519        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 520        struct ocs_aes_rctx *rctx = skcipher_request_ctx(req);
 521        struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
 522        struct ocs_aes_dev *aes_dev = tctx->aes_dev;
 523        int iv_size = crypto_skcipher_ivsize(tfm);
 524        int rc;
 525
 526        rctx->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
 527        if (rctx->dst_nents < 0)
 528                return -EBADMSG;
 529
 530        /*
 531         * If 2 blocks or greater, and multiple of block size swap last two
 532         * blocks to be compatible with other crypto API CTS implementations:
 533         * OCS mode uses CBC-CS2, whereas other crypto API implementations use
 534         * CBC-CS3.
 535         * CBC-CS2 and CBC-CS3 defined by:
 536         * https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38a-add.pdf
 537         */
 538        rctx->cts_swap = (rctx->mode == OCS_MODE_CTS &&
 539                          req->cryptlen > AES_BLOCK_SIZE &&
 540                          req->cryptlen % AES_BLOCK_SIZE == 0);
 541
 542        rctx->in_place = (req->src == req->dst);
 543
 544        if (rctx->in_place)
 545                rc = kmb_ocs_sk_prepare_inplace(req);
 546        else
 547                rc = kmb_ocs_sk_prepare_notinplace(req);
 548
 549        if (rc)
 550                goto error;
 551
 552        rc = ocs_aes_op(aes_dev, rctx->mode, tctx->cipher, rctx->instruction,
 553                        rctx->dst_dll.dma_addr, rctx->src_dll.dma_addr,
 554                        req->cryptlen, req->iv, iv_size);
 555        if (rc)
 556                goto error;
 557
 558        /* Clean-up DMA before further processing output. */
 559        kmb_ocs_sk_dma_cleanup(req);
 560
 561        /* For CTS Encrypt, swap last 2 blocks, if needed. */
 562        if (rctx->cts_swap && rctx->instruction == OCS_ENCRYPT) {
 563                sg_swap_blocks(req->dst, rctx->dst_nents,
 564                               req->cryptlen - AES_BLOCK_SIZE,
 565                               req->cryptlen - (2 * AES_BLOCK_SIZE));
 566                return 0;
 567        }
 568
 569        /* For CBC copy IV to req->IV. */
 570        if (rctx->mode == OCS_MODE_CBC) {
 571                /* CBC encrypt case. */
 572                if (rctx->instruction == OCS_ENCRYPT) {
 573                        scatterwalk_map_and_copy(req->iv, req->dst,
 574                                                 req->cryptlen - iv_size,
 575                                                 iv_size, 0);
 576                        return 0;
 577                }
 578                /* CBC decrypt case. */
 579                if (rctx->in_place)
 580                        memcpy(req->iv, rctx->last_ct_blk, iv_size);
 581                else
 582                        scatterwalk_map_and_copy(req->iv, req->src,
 583                                                 req->cryptlen - iv_size,
 584                                                 iv_size, 0);
 585                return 0;
 586        }
 587        /* For all other modes there's nothing to do. */
 588
 589        return 0;
 590
 591error:
 592        kmb_ocs_sk_dma_cleanup(req);
 593
 594        return rc;
 595}
 596
 597static int kmb_ocs_aead_validate_input(struct aead_request *req,
 598                                       enum ocs_instruction instruction,
 599                                       enum ocs_mode mode)
 600{
 601        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 602        int tag_size = crypto_aead_authsize(tfm);
 603        int iv_size = crypto_aead_ivsize(tfm);
 604
 605        /* For decrypt crytplen == len(PT) + len(tag). */
 606        if (instruction == OCS_DECRYPT && req->cryptlen < tag_size)
 607                return -EINVAL;
 608
 609        /* IV is mandatory. */
 610        if (!req->iv)
 611                return -EINVAL;
 612
 613        switch (mode) {
 614        case OCS_MODE_GCM:
 615                if (iv_size != GCM_AES_IV_SIZE)
 616                        return -EINVAL;
 617
 618                return 0;
 619
 620        case OCS_MODE_CCM:
 621                /* Ensure IV is present and block size in length */
 622                if (iv_size != AES_BLOCK_SIZE)
 623                        return -EINVAL;
 624
 625                return 0;
 626
 627        default:
 628                return -EINVAL;
 629        }
 630}
 631
 632/*
 633 * Called by encrypt() / decrypt() aead functions.
 634 *
 635 * Use fallback if needed, otherwise initialize context and enqueue request
 636 * into engine.
 637 */
 638static int kmb_ocs_aead_common(struct aead_request *req,
 639                               enum ocs_cipher cipher,
 640                               enum ocs_instruction instruction,
 641                               enum ocs_mode mode)
 642{
 643        struct ocs_aes_tctx *tctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
 644        struct ocs_aes_rctx *rctx = aead_request_ctx(req);
 645        struct ocs_aes_dev *dd;
 646        int rc;
 647
 648        if (tctx->use_fallback) {
 649                struct aead_request *subreq = aead_request_ctx(req);
 650
 651                aead_request_set_tfm(subreq, tctx->sw_cipher.aead);
 652                aead_request_set_callback(subreq, req->base.flags,
 653                                          req->base.complete, req->base.data);
 654                aead_request_set_crypt(subreq, req->src, req->dst,
 655                                       req->cryptlen, req->iv);
 656                aead_request_set_ad(subreq, req->assoclen);
 657                rc = crypto_aead_setauthsize(tctx->sw_cipher.aead,
 658                                             crypto_aead_authsize(crypto_aead_reqtfm(req)));
 659                if (rc)
 660                        return rc;
 661
 662                return (instruction == OCS_ENCRYPT) ?
 663                       crypto_aead_encrypt(subreq) :
 664                       crypto_aead_decrypt(subreq);
 665        }
 666
 667        rc = kmb_ocs_aead_validate_input(req, instruction, mode);
 668        if (rc)
 669                return rc;
 670
 671        dd = kmb_ocs_aes_find_dev(tctx);
 672        if (!dd)
 673                return -ENODEV;
 674
 675        if (cipher != tctx->cipher)
 676                return -EINVAL;
 677
 678        ocs_aes_init_rctx(rctx);
 679        rctx->instruction = instruction;
 680        rctx->mode = mode;
 681
 682        return crypto_transfer_aead_request_to_engine(dd->engine, req);
 683}
 684
 685static void kmb_ocs_aead_dma_cleanup(struct aead_request *req)
 686{
 687        struct ocs_aes_tctx *tctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
 688        struct ocs_aes_rctx *rctx = aead_request_ctx(req);
 689        struct device *dev = tctx->aes_dev->dev;
 690
 691        if (rctx->src_dma_count) {
 692                dma_unmap_sg(dev, req->src, rctx->src_nents, DMA_TO_DEVICE);
 693                rctx->src_dma_count = 0;
 694        }
 695
 696        if (rctx->dst_dma_count) {
 697                dma_unmap_sg(dev, req->dst, rctx->dst_nents, rctx->in_place ?
 698                                                             DMA_BIDIRECTIONAL :
 699                                                             DMA_FROM_DEVICE);
 700                rctx->dst_dma_count = 0;
 701        }
 702        /* Clean up OCS DMA linked lists */
 703        cleanup_ocs_dma_linked_list(dev, &rctx->src_dll);
 704        cleanup_ocs_dma_linked_list(dev, &rctx->dst_dll);
 705        cleanup_ocs_dma_linked_list(dev, &rctx->aad_src_dll);
 706        cleanup_ocs_dma_linked_list(dev, &rctx->aad_dst_dll);
 707}
 708
 709/**
 710 * kmb_ocs_aead_dma_prepare() - Do DMA mapping for AEAD processing.
 711 * @req:                The AEAD request being processed.
 712 * @src_dll_size:       Where to store the length of the data mapped into the
 713 *                      src_dll OCS DMA list.
 714 *
 715 * Do the following:
 716 * - DMA map req->src and req->dst
 717 * - Initialize the following OCS DMA linked lists: rctx->src_dll,
 718 *   rctx->dst_dll, rctx->aad_src_dll and rxtc->aad_dst_dll.
 719 *
 720 * Return: 0 on success, negative error code otherwise.
 721 */
 722static int kmb_ocs_aead_dma_prepare(struct aead_request *req, u32 *src_dll_size)
 723{
 724        struct ocs_aes_tctx *tctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
 725        const int tag_size = crypto_aead_authsize(crypto_aead_reqtfm(req));
 726        struct ocs_aes_rctx *rctx = aead_request_ctx(req);
 727        u32 in_size;    /* The length of the data to be mapped by src_dll. */
 728        u32 out_size;   /* The length of the data to be mapped by dst_dll. */
 729        u32 dst_size;   /* The length of the data in dst_sg. */
 730        int rc;
 731
 732        /* Get number of entries in input data SG list. */
 733        rctx->src_nents = sg_nents_for_len(req->src,
 734                                           req->assoclen + req->cryptlen);
 735        if (rctx->src_nents < 0)
 736                return -EBADMSG;
 737
 738        if (rctx->instruction == OCS_DECRYPT) {
 739                /*
 740                 * For decrypt:
 741                 * - src sg list is:            AAD|CT|tag
 742                 * - dst sg list expects:       AAD|PT
 743                 *
 744                 * in_size == len(CT); out_size == len(PT)
 745                 */
 746
 747                /* req->cryptlen includes both CT and tag. */
 748                in_size = req->cryptlen - tag_size;
 749
 750                /* out_size = PT size == CT size */
 751                out_size = in_size;
 752
 753                /* len(dst_sg) == len(AAD) + len(PT) */
 754                dst_size = req->assoclen + out_size;
 755
 756                /*
 757                 * Copy tag from source SG list to 'in_tag' buffer.
 758                 *
 759                 * Note: this needs to be done here, before DMA mapping src_sg.
 760                 */
 761                sg_pcopy_to_buffer(req->src, rctx->src_nents, rctx->in_tag,
 762                                   tag_size, req->assoclen + in_size);
 763
 764        } else { /* OCS_ENCRYPT */
 765                /*
 766                 * For encrypt:
 767                 *      src sg list is:         AAD|PT
 768                 *      dst sg list expects:    AAD|CT|tag
 769                 */
 770                /* in_size == len(PT) */
 771                in_size = req->cryptlen;
 772
 773                /*
 774                 * In CCM mode the OCS engine appends the tag to the ciphertext,
 775                 * but in GCM mode the tag must be read from the tag registers
 776                 * and appended manually below
 777                 */
 778                out_size = (rctx->mode == OCS_MODE_CCM) ? in_size + tag_size :
 779                                                          in_size;
 780                /* len(dst_sg) == len(AAD) + len(CT) + len(tag) */
 781                dst_size = req->assoclen + in_size + tag_size;
 782        }
 783        *src_dll_size = in_size;
 784
 785        /* Get number of entries in output data SG list. */
 786        rctx->dst_nents = sg_nents_for_len(req->dst, dst_size);
 787        if (rctx->dst_nents < 0)
 788                return -EBADMSG;
 789
 790        rctx->in_place = (req->src == req->dst) ? 1 : 0;
 791
 792        /* Map destination; use bidirectional mapping for in-place case. */
 793        rctx->dst_dma_count = dma_map_sg(tctx->aes_dev->dev, req->dst,
 794                                         rctx->dst_nents,
 795                                         rctx->in_place ? DMA_BIDIRECTIONAL :
 796                                                          DMA_FROM_DEVICE);
 797        if (rctx->dst_dma_count == 0 && rctx->dst_nents != 0) {
 798                dev_err(tctx->aes_dev->dev, "Failed to map destination sg\n");
 799                return -ENOMEM;
 800        }
 801
 802        /* Create AAD DST list: maps dst[0:AAD_SIZE-1]. */
 803        rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->dst,
 804                                            rctx->dst_dma_count,
 805                                            &rctx->aad_dst_dll, req->assoclen,
 806                                            0);
 807        if (rc)
 808                return rc;
 809
 810        /* Create DST list: maps dst[AAD_SIZE:out_size] */
 811        rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->dst,
 812                                            rctx->dst_dma_count, &rctx->dst_dll,
 813                                            out_size, req->assoclen);
 814        if (rc)
 815                return rc;
 816
 817        if (rctx->in_place) {
 818                /* If this is not CCM encrypt, we are done. */
 819                if (!(rctx->mode == OCS_MODE_CCM &&
 820                      rctx->instruction == OCS_ENCRYPT)) {
 821                        /*
 822                         * SRC and DST are the same, so re-use the same DMA
 823                         * addresses (to avoid allocating new DMA lists
 824                         * identical to the dst ones).
 825                         */
 826                        rctx->src_dll.dma_addr = rctx->dst_dll.dma_addr;
 827                        rctx->aad_src_dll.dma_addr = rctx->aad_dst_dll.dma_addr;
 828
 829                        return 0;
 830                }
 831                /*
 832                 * For CCM encrypt the input and output linked lists contain
 833                 * different amounts of data, so, we need to create different
 834                 * SRC and AAD SRC lists, even for the in-place case.
 835                 */
 836                rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->dst,
 837                                                    rctx->dst_dma_count,
 838                                                    &rctx->aad_src_dll,
 839                                                    req->assoclen, 0);
 840                if (rc)
 841                        return rc;
 842                rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->dst,
 843                                                    rctx->dst_dma_count,
 844                                                    &rctx->src_dll, in_size,
 845                                                    req->assoclen);
 846                if (rc)
 847                        return rc;
 848
 849                return 0;
 850        }
 851        /* Not in-place case. */
 852
 853        /* Map source SG. */
 854        rctx->src_dma_count = dma_map_sg(tctx->aes_dev->dev, req->src,
 855                                         rctx->src_nents, DMA_TO_DEVICE);
 856        if (rctx->src_dma_count == 0 && rctx->src_nents != 0) {
 857                dev_err(tctx->aes_dev->dev, "Failed to map source sg\n");
 858                return -ENOMEM;
 859        }
 860
 861        /* Create AAD SRC list. */
 862        rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->src,
 863                                            rctx->src_dma_count,
 864                                            &rctx->aad_src_dll,
 865                                            req->assoclen, 0);
 866        if (rc)
 867                return rc;
 868
 869        /* Create SRC list. */
 870        rc = ocs_create_linked_list_from_sg(tctx->aes_dev, req->src,
 871                                            rctx->src_dma_count,
 872                                            &rctx->src_dll, in_size,
 873                                            req->assoclen);
 874        if (rc)
 875                return rc;
 876
 877        if (req->assoclen == 0)
 878                return 0;
 879
 880        /* Copy AAD from src sg to dst sg using OCS DMA. */
 881        rc = ocs_aes_bypass_op(tctx->aes_dev, rctx->aad_dst_dll.dma_addr,
 882                               rctx->aad_src_dll.dma_addr, req->cryptlen);
 883        if (rc)
 884                dev_err(tctx->aes_dev->dev,
 885                        "Failed to copy source AAD to destination AAD\n");
 886
 887        return rc;
 888}
 889
 890static int kmb_ocs_aead_run(struct aead_request *req)
 891{
 892        struct ocs_aes_tctx *tctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
 893        const int tag_size = crypto_aead_authsize(crypto_aead_reqtfm(req));
 894        struct ocs_aes_rctx *rctx = aead_request_ctx(req);
 895        u32 in_size;    /* The length of the data mapped by src_dll. */
 896        int rc;
 897
 898        rc = kmb_ocs_aead_dma_prepare(req, &in_size);
 899        if (rc)
 900                goto exit;
 901
 902        /* For CCM, we just call the OCS processing and we are done. */
 903        if (rctx->mode == OCS_MODE_CCM) {
 904                rc = ocs_aes_ccm_op(tctx->aes_dev, tctx->cipher,
 905                                    rctx->instruction, rctx->dst_dll.dma_addr,
 906                                    rctx->src_dll.dma_addr, in_size,
 907                                    req->iv,
 908                                    rctx->aad_src_dll.dma_addr, req->assoclen,
 909                                    rctx->in_tag, tag_size);
 910                goto exit;
 911        }
 912        /* GCM case; invoke OCS processing. */
 913        rc = ocs_aes_gcm_op(tctx->aes_dev, tctx->cipher,
 914                            rctx->instruction,
 915                            rctx->dst_dll.dma_addr,
 916                            rctx->src_dll.dma_addr, in_size,
 917                            req->iv,
 918                            rctx->aad_src_dll.dma_addr, req->assoclen,
 919                            rctx->out_tag, tag_size);
 920        if (rc)
 921                goto exit;
 922
 923        /* For GCM decrypt, we have to compare in_tag with out_tag. */
 924        if (rctx->instruction == OCS_DECRYPT) {
 925                rc = memcmp(rctx->in_tag, rctx->out_tag, tag_size) ?
 926                     -EBADMSG : 0;
 927                goto exit;
 928        }
 929
 930        /* For GCM encrypt, we must manually copy out_tag to DST sg. */
 931
 932        /* Clean-up must be called before the sg_pcopy_from_buffer() below. */
 933        kmb_ocs_aead_dma_cleanup(req);
 934
 935        /* Copy tag to destination sg after AAD and CT. */
 936        sg_pcopy_from_buffer(req->dst, rctx->dst_nents, rctx->out_tag,
 937                             tag_size, req->assoclen + req->cryptlen);
 938
 939        /* Return directly as DMA cleanup already done. */
 940        return 0;
 941
 942exit:
 943        kmb_ocs_aead_dma_cleanup(req);
 944
 945        return rc;
 946}
 947
 948static int kmb_ocs_aes_sk_do_one_request(struct crypto_engine *engine,
 949                                         void *areq)
 950{
 951        struct skcipher_request *req =
 952                        container_of(areq, struct skcipher_request, base);
 953        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 954        struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
 955        int err;
 956
 957        if (!tctx->aes_dev) {
 958                err = -ENODEV;
 959                goto exit;
 960        }
 961
 962        err = ocs_aes_set_key(tctx->aes_dev, tctx->key_len, tctx->key,
 963                              tctx->cipher);
 964        if (err)
 965                goto exit;
 966
 967        err = kmb_ocs_sk_run(req);
 968
 969exit:
 970        crypto_finalize_skcipher_request(engine, req, err);
 971
 972        return 0;
 973}
 974
 975static int kmb_ocs_aes_aead_do_one_request(struct crypto_engine *engine,
 976                                           void *areq)
 977{
 978        struct aead_request *req = container_of(areq,
 979                                                struct aead_request, base);
 980        struct ocs_aes_tctx *tctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
 981        int err;
 982
 983        if (!tctx->aes_dev)
 984                return -ENODEV;
 985
 986        err = ocs_aes_set_key(tctx->aes_dev, tctx->key_len, tctx->key,
 987                              tctx->cipher);
 988        if (err)
 989                goto exit;
 990
 991        err = kmb_ocs_aead_run(req);
 992
 993exit:
 994        crypto_finalize_aead_request(tctx->aes_dev->engine, req, err);
 995
 996        return 0;
 997}
 998
 999static int kmb_ocs_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
1000                               unsigned int key_len)
1001{
1002        return kmb_ocs_sk_set_key(tfm, in_key, key_len, OCS_AES);
1003}
1004
1005static int kmb_ocs_aes_aead_set_key(struct crypto_aead *tfm, const u8 *in_key,
1006                                    unsigned int key_len)
1007{
1008        return kmb_ocs_aead_set_key(tfm, in_key, key_len, OCS_AES);
1009}
1010
1011#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
1012static int kmb_ocs_aes_ecb_encrypt(struct skcipher_request *req)
1013{
1014        return kmb_ocs_sk_common(req, OCS_AES, OCS_ENCRYPT, OCS_MODE_ECB);
1015}
1016
1017static int kmb_ocs_aes_ecb_decrypt(struct skcipher_request *req)
1018{
1019        return kmb_ocs_sk_common(req, OCS_AES, OCS_DECRYPT, OCS_MODE_ECB);
1020}
1021#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
1022
1023static int kmb_ocs_aes_cbc_encrypt(struct skcipher_request *req)
1024{
1025        return kmb_ocs_sk_common(req, OCS_AES, OCS_ENCRYPT, OCS_MODE_CBC);
1026}
1027
1028static int kmb_ocs_aes_cbc_decrypt(struct skcipher_request *req)
1029{
1030        return kmb_ocs_sk_common(req, OCS_AES, OCS_DECRYPT, OCS_MODE_CBC);
1031}
1032
1033static int kmb_ocs_aes_ctr_encrypt(struct skcipher_request *req)
1034{
1035        return kmb_ocs_sk_common(req, OCS_AES, OCS_ENCRYPT, OCS_MODE_CTR);
1036}
1037
1038static int kmb_ocs_aes_ctr_decrypt(struct skcipher_request *req)
1039{
1040        return kmb_ocs_sk_common(req, OCS_AES, OCS_DECRYPT, OCS_MODE_CTR);
1041}
1042
1043#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
1044static int kmb_ocs_aes_cts_encrypt(struct skcipher_request *req)
1045{
1046        return kmb_ocs_sk_common(req, OCS_AES, OCS_ENCRYPT, OCS_MODE_CTS);
1047}
1048
1049static int kmb_ocs_aes_cts_decrypt(struct skcipher_request *req)
1050{
1051        return kmb_ocs_sk_common(req, OCS_AES, OCS_DECRYPT, OCS_MODE_CTS);
1052}
1053#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
1054
1055static int kmb_ocs_aes_gcm_encrypt(struct aead_request *req)
1056{
1057        return kmb_ocs_aead_common(req, OCS_AES, OCS_ENCRYPT, OCS_MODE_GCM);
1058}
1059
1060static int kmb_ocs_aes_gcm_decrypt(struct aead_request *req)
1061{
1062        return kmb_ocs_aead_common(req, OCS_AES, OCS_DECRYPT, OCS_MODE_GCM);
1063}
1064
1065static int kmb_ocs_aes_ccm_encrypt(struct aead_request *req)
1066{
1067        return kmb_ocs_aead_common(req, OCS_AES, OCS_ENCRYPT, OCS_MODE_CCM);
1068}
1069
1070static int kmb_ocs_aes_ccm_decrypt(struct aead_request *req)
1071{
1072        return kmb_ocs_aead_common(req, OCS_AES, OCS_DECRYPT, OCS_MODE_CCM);
1073}
1074
1075static int kmb_ocs_sm4_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
1076                               unsigned int key_len)
1077{
1078        return kmb_ocs_sk_set_key(tfm, in_key, key_len, OCS_SM4);
1079}
1080
1081static int kmb_ocs_sm4_aead_set_key(struct crypto_aead *tfm, const u8 *in_key,
1082                                    unsigned int key_len)
1083{
1084        return kmb_ocs_aead_set_key(tfm, in_key, key_len, OCS_SM4);
1085}
1086
1087#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
1088static int kmb_ocs_sm4_ecb_encrypt(struct skcipher_request *req)
1089{
1090        return kmb_ocs_sk_common(req, OCS_SM4, OCS_ENCRYPT, OCS_MODE_ECB);
1091}
1092
1093static int kmb_ocs_sm4_ecb_decrypt(struct skcipher_request *req)
1094{
1095        return kmb_ocs_sk_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_ECB);
1096}
1097#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
1098
1099static int kmb_ocs_sm4_cbc_encrypt(struct skcipher_request *req)
1100{
1101        return kmb_ocs_sk_common(req, OCS_SM4, OCS_ENCRYPT, OCS_MODE_CBC);
1102}
1103
1104static int kmb_ocs_sm4_cbc_decrypt(struct skcipher_request *req)
1105{
1106        return kmb_ocs_sk_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_CBC);
1107}
1108
1109static int kmb_ocs_sm4_ctr_encrypt(struct skcipher_request *req)
1110{
1111        return kmb_ocs_sk_common(req, OCS_SM4, OCS_ENCRYPT, OCS_MODE_CTR);
1112}
1113
1114static int kmb_ocs_sm4_ctr_decrypt(struct skcipher_request *req)
1115{
1116        return kmb_ocs_sk_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_CTR);
1117}
1118
1119#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
1120static int kmb_ocs_sm4_cts_encrypt(struct skcipher_request *req)
1121{
1122        return kmb_ocs_sk_common(req, OCS_SM4, OCS_ENCRYPT, OCS_MODE_CTS);
1123}
1124
1125static int kmb_ocs_sm4_cts_decrypt(struct skcipher_request *req)
1126{
1127        return kmb_ocs_sk_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_CTS);
1128}
1129#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
1130
1131static int kmb_ocs_sm4_gcm_encrypt(struct aead_request *req)
1132{
1133        return kmb_ocs_aead_common(req, OCS_SM4, OCS_ENCRYPT, OCS_MODE_GCM);
1134}
1135
1136static int kmb_ocs_sm4_gcm_decrypt(struct aead_request *req)
1137{
1138        return kmb_ocs_aead_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_GCM);
1139}
1140
1141static int kmb_ocs_sm4_ccm_encrypt(struct aead_request *req)
1142{
1143        return kmb_ocs_aead_common(req, OCS_SM4, OCS_ENCRYPT, OCS_MODE_CCM);
1144}
1145
1146static int kmb_ocs_sm4_ccm_decrypt(struct aead_request *req)
1147{
1148        return kmb_ocs_aead_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_CCM);
1149}
1150
1151static inline int ocs_common_init(struct ocs_aes_tctx *tctx)
1152{
1153        tctx->engine_ctx.op.prepare_request = NULL;
1154        tctx->engine_ctx.op.do_one_request = kmb_ocs_aes_sk_do_one_request;
1155        tctx->engine_ctx.op.unprepare_request = NULL;
1156
1157        return 0;
1158}
1159
1160static int ocs_aes_init_tfm(struct crypto_skcipher *tfm)
1161{
1162        const char *alg_name = crypto_tfm_alg_name(&tfm->base);
1163        struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
1164        struct crypto_sync_skcipher *blk;
1165
1166        /* set fallback cipher in case it will be needed */
1167        blk = crypto_alloc_sync_skcipher(alg_name, 0, CRYPTO_ALG_NEED_FALLBACK);
1168        if (IS_ERR(blk))
1169                return PTR_ERR(blk);
1170
1171        tctx->sw_cipher.sk = blk;
1172
1173        crypto_skcipher_set_reqsize(tfm, sizeof(struct ocs_aes_rctx));
1174
1175        return ocs_common_init(tctx);
1176}
1177
1178static int ocs_sm4_init_tfm(struct crypto_skcipher *tfm)
1179{
1180        struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
1181
1182        crypto_skcipher_set_reqsize(tfm, sizeof(struct ocs_aes_rctx));
1183
1184        return ocs_common_init(tctx);
1185}
1186
1187static inline void clear_key(struct ocs_aes_tctx *tctx)
1188{
1189        memzero_explicit(tctx->key, OCS_AES_KEYSIZE_256);
1190
1191        /* Zero key registers if set */
1192        if (tctx->aes_dev)
1193                ocs_aes_set_key(tctx->aes_dev, OCS_AES_KEYSIZE_256,
1194                                tctx->key, OCS_AES);
1195}
1196
1197static void ocs_exit_tfm(struct crypto_skcipher *tfm)
1198{
1199        struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
1200
1201        clear_key(tctx);
1202
1203        if (tctx->sw_cipher.sk) {
1204                crypto_free_sync_skcipher(tctx->sw_cipher.sk);
1205                tctx->sw_cipher.sk = NULL;
1206        }
1207}
1208
1209static inline int ocs_common_aead_init(struct ocs_aes_tctx *tctx)
1210{
1211        tctx->engine_ctx.op.prepare_request = NULL;
1212        tctx->engine_ctx.op.do_one_request = kmb_ocs_aes_aead_do_one_request;
1213        tctx->engine_ctx.op.unprepare_request = NULL;
1214
1215        return 0;
1216}
1217
1218static int ocs_aes_aead_cra_init(struct crypto_aead *tfm)
1219{
1220        const char *alg_name = crypto_tfm_alg_name(&tfm->base);
1221        struct ocs_aes_tctx *tctx = crypto_aead_ctx(tfm);
1222        struct crypto_aead *blk;
1223
1224        /* Set fallback cipher in case it will be needed */
1225        blk = crypto_alloc_aead(alg_name, 0, CRYPTO_ALG_NEED_FALLBACK);
1226        if (IS_ERR(blk))
1227                return PTR_ERR(blk);
1228
1229        tctx->sw_cipher.aead = blk;
1230
1231        crypto_aead_set_reqsize(tfm,
1232                                max(sizeof(struct ocs_aes_rctx),
1233                                    (sizeof(struct aead_request) +
1234                                     crypto_aead_reqsize(tctx->sw_cipher.aead))));
1235
1236        return ocs_common_aead_init(tctx);
1237}
1238
1239static int kmb_ocs_aead_ccm_setauthsize(struct crypto_aead *tfm,
1240                                        unsigned int authsize)
1241{
1242        switch (authsize) {
1243        case 4:
1244        case 6:
1245        case 8:
1246        case 10:
1247        case 12:
1248        case 14:
1249        case 16:
1250                return 0;
1251        default:
1252                return -EINVAL;
1253        }
1254}
1255
1256static int kmb_ocs_aead_gcm_setauthsize(struct crypto_aead *tfm,
1257                                        unsigned int authsize)
1258{
1259        return crypto_gcm_check_authsize(authsize);
1260}
1261
1262static int ocs_sm4_aead_cra_init(struct crypto_aead *tfm)
1263{
1264        struct ocs_aes_tctx *tctx = crypto_aead_ctx(tfm);
1265
1266        crypto_aead_set_reqsize(tfm, sizeof(struct ocs_aes_rctx));
1267
1268        return ocs_common_aead_init(tctx);
1269}
1270
1271static void ocs_aead_cra_exit(struct crypto_aead *tfm)
1272{
1273        struct ocs_aes_tctx *tctx = crypto_aead_ctx(tfm);
1274
1275        clear_key(tctx);
1276
1277        if (tctx->sw_cipher.aead) {
1278                crypto_free_aead(tctx->sw_cipher.aead);
1279                tctx->sw_cipher.aead = NULL;
1280        }
1281}
1282
1283static struct skcipher_alg algs[] = {
1284#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
1285        {
1286                .base.cra_name = "ecb(aes)",
1287                .base.cra_driver_name = "ecb-aes-keembay-ocs",
1288                .base.cra_priority = KMB_OCS_PRIORITY,
1289                .base.cra_flags = CRYPTO_ALG_ASYNC |
1290                                  CRYPTO_ALG_KERN_DRIVER_ONLY |
1291                                  CRYPTO_ALG_NEED_FALLBACK,
1292                .base.cra_blocksize = AES_BLOCK_SIZE,
1293                .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
1294                .base.cra_module = THIS_MODULE,
1295                .base.cra_alignmask = 0,
1296
1297                .min_keysize = OCS_AES_MIN_KEY_SIZE,
1298                .max_keysize = OCS_AES_MAX_KEY_SIZE,
1299                .setkey = kmb_ocs_aes_set_key,
1300                .encrypt = kmb_ocs_aes_ecb_encrypt,
1301                .decrypt = kmb_ocs_aes_ecb_decrypt,
1302                .init = ocs_aes_init_tfm,
1303                .exit = ocs_exit_tfm,
1304        },
1305#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
1306        {
1307                .base.cra_name = "cbc(aes)",
1308                .base.cra_driver_name = "cbc-aes-keembay-ocs",
1309                .base.cra_priority = KMB_OCS_PRIORITY,
1310                .base.cra_flags = CRYPTO_ALG_ASYNC |
1311                                  CRYPTO_ALG_KERN_DRIVER_ONLY |
1312                                  CRYPTO_ALG_NEED_FALLBACK,
1313                .base.cra_blocksize = AES_BLOCK_SIZE,
1314                .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
1315                .base.cra_module = THIS_MODULE,
1316                .base.cra_alignmask = 0,
1317
1318                .min_keysize = OCS_AES_MIN_KEY_SIZE,
1319                .max_keysize = OCS_AES_MAX_KEY_SIZE,
1320                .ivsize = AES_BLOCK_SIZE,
1321                .setkey = kmb_ocs_aes_set_key,
1322                .encrypt = kmb_ocs_aes_cbc_encrypt,
1323                .decrypt = kmb_ocs_aes_cbc_decrypt,
1324                .init = ocs_aes_init_tfm,
1325                .exit = ocs_exit_tfm,
1326        },
1327        {
1328                .base.cra_name = "ctr(aes)",
1329                .base.cra_driver_name = "ctr-aes-keembay-ocs",
1330                .base.cra_priority = KMB_OCS_PRIORITY,
1331                .base.cra_flags = CRYPTO_ALG_ASYNC |
1332                                  CRYPTO_ALG_KERN_DRIVER_ONLY |
1333                                  CRYPTO_ALG_NEED_FALLBACK,
1334                .base.cra_blocksize = 1,
1335                .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
1336                .base.cra_module = THIS_MODULE,
1337                .base.cra_alignmask = 0,
1338
1339                .min_keysize = OCS_AES_MIN_KEY_SIZE,
1340                .max_keysize = OCS_AES_MAX_KEY_SIZE,
1341                .ivsize = AES_BLOCK_SIZE,
1342                .setkey = kmb_ocs_aes_set_key,
1343                .encrypt = kmb_ocs_aes_ctr_encrypt,
1344                .decrypt = kmb_ocs_aes_ctr_decrypt,
1345                .init = ocs_aes_init_tfm,
1346                .exit = ocs_exit_tfm,
1347        },
1348#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
1349        {
1350                .base.cra_name = "cts(cbc(aes))",
1351                .base.cra_driver_name = "cts-aes-keembay-ocs",
1352                .base.cra_priority = KMB_OCS_PRIORITY,
1353                .base.cra_flags = CRYPTO_ALG_ASYNC |
1354                                  CRYPTO_ALG_KERN_DRIVER_ONLY |
1355                                  CRYPTO_ALG_NEED_FALLBACK,
1356                .base.cra_blocksize = AES_BLOCK_SIZE,
1357                .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
1358                .base.cra_module = THIS_MODULE,
1359                .base.cra_alignmask = 0,
1360
1361                .min_keysize = OCS_AES_MIN_KEY_SIZE,
1362                .max_keysize = OCS_AES_MAX_KEY_SIZE,
1363                .ivsize = AES_BLOCK_SIZE,
1364                .setkey = kmb_ocs_aes_set_key,
1365                .encrypt = kmb_ocs_aes_cts_encrypt,
1366                .decrypt = kmb_ocs_aes_cts_decrypt,
1367                .init = ocs_aes_init_tfm,
1368                .exit = ocs_exit_tfm,
1369        },
1370#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
1371#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
1372        {
1373                .base.cra_name = "ecb(sm4)",
1374                .base.cra_driver_name = "ecb-sm4-keembay-ocs",
1375                .base.cra_priority = KMB_OCS_PRIORITY,
1376                .base.cra_flags = CRYPTO_ALG_ASYNC |
1377                                  CRYPTO_ALG_KERN_DRIVER_ONLY,
1378                .base.cra_blocksize = AES_BLOCK_SIZE,
1379                .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
1380                .base.cra_module = THIS_MODULE,
1381                .base.cra_alignmask = 0,
1382
1383                .min_keysize = OCS_SM4_KEY_SIZE,
1384                .max_keysize = OCS_SM4_KEY_SIZE,
1385                .setkey = kmb_ocs_sm4_set_key,
1386                .encrypt = kmb_ocs_sm4_ecb_encrypt,
1387                .decrypt = kmb_ocs_sm4_ecb_decrypt,
1388                .init = ocs_sm4_init_tfm,
1389                .exit = ocs_exit_tfm,
1390        },
1391#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
1392        {
1393                .base.cra_name = "cbc(sm4)",
1394                .base.cra_driver_name = "cbc-sm4-keembay-ocs",
1395                .base.cra_priority = KMB_OCS_PRIORITY,
1396                .base.cra_flags = CRYPTO_ALG_ASYNC |
1397                                  CRYPTO_ALG_KERN_DRIVER_ONLY,
1398                .base.cra_blocksize = AES_BLOCK_SIZE,
1399                .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
1400                .base.cra_module = THIS_MODULE,
1401                .base.cra_alignmask = 0,
1402
1403                .min_keysize = OCS_SM4_KEY_SIZE,
1404                .max_keysize = OCS_SM4_KEY_SIZE,
1405                .ivsize = AES_BLOCK_SIZE,
1406                .setkey = kmb_ocs_sm4_set_key,
1407                .encrypt = kmb_ocs_sm4_cbc_encrypt,
1408                .decrypt = kmb_ocs_sm4_cbc_decrypt,
1409                .init = ocs_sm4_init_tfm,
1410                .exit = ocs_exit_tfm,
1411        },
1412        {
1413                .base.cra_name = "ctr(sm4)",
1414                .base.cra_driver_name = "ctr-sm4-keembay-ocs",
1415                .base.cra_priority = KMB_OCS_PRIORITY,
1416                .base.cra_flags = CRYPTO_ALG_ASYNC |
1417                                  CRYPTO_ALG_KERN_DRIVER_ONLY,
1418                .base.cra_blocksize = 1,
1419                .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
1420                .base.cra_module = THIS_MODULE,
1421                .base.cra_alignmask = 0,
1422
1423                .min_keysize = OCS_SM4_KEY_SIZE,
1424                .max_keysize = OCS_SM4_KEY_SIZE,
1425                .ivsize = AES_BLOCK_SIZE,
1426                .setkey = kmb_ocs_sm4_set_key,
1427                .encrypt = kmb_ocs_sm4_ctr_encrypt,
1428                .decrypt = kmb_ocs_sm4_ctr_decrypt,
1429                .init = ocs_sm4_init_tfm,
1430                .exit = ocs_exit_tfm,
1431        },
1432#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
1433        {
1434                .base.cra_name = "cts(cbc(sm4))",
1435                .base.cra_driver_name = "cts-sm4-keembay-ocs",
1436                .base.cra_priority = KMB_OCS_PRIORITY,
1437                .base.cra_flags = CRYPTO_ALG_ASYNC |
1438                                  CRYPTO_ALG_KERN_DRIVER_ONLY,
1439                .base.cra_blocksize = AES_BLOCK_SIZE,
1440                .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
1441                .base.cra_module = THIS_MODULE,
1442                .base.cra_alignmask = 0,
1443
1444                .min_keysize = OCS_SM4_KEY_SIZE,
1445                .max_keysize = OCS_SM4_KEY_SIZE,
1446                .ivsize = AES_BLOCK_SIZE,
1447                .setkey = kmb_ocs_sm4_set_key,
1448                .encrypt = kmb_ocs_sm4_cts_encrypt,
1449                .decrypt = kmb_ocs_sm4_cts_decrypt,
1450                .init = ocs_sm4_init_tfm,
1451                .exit = ocs_exit_tfm,
1452        }
1453#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
1454};
1455
1456static struct aead_alg algs_aead[] = {
1457        {
1458                .base = {
1459                        .cra_name = "gcm(aes)",
1460                        .cra_driver_name = "gcm-aes-keembay-ocs",
1461                        .cra_priority = KMB_OCS_PRIORITY,
1462                        .cra_flags = CRYPTO_ALG_ASYNC |
1463                                     CRYPTO_ALG_KERN_DRIVER_ONLY |
1464                                     CRYPTO_ALG_NEED_FALLBACK,
1465                        .cra_blocksize = 1,
1466                        .cra_ctxsize = sizeof(struct ocs_aes_tctx),
1467                        .cra_alignmask = 0,
1468                        .cra_module = THIS_MODULE,
1469                },
1470                .init = ocs_aes_aead_cra_init,
1471                .exit = ocs_aead_cra_exit,
1472                .ivsize = GCM_AES_IV_SIZE,
1473                .maxauthsize = AES_BLOCK_SIZE,
1474                .setauthsize = kmb_ocs_aead_gcm_setauthsize,
1475                .setkey = kmb_ocs_aes_aead_set_key,
1476                .encrypt = kmb_ocs_aes_gcm_encrypt,
1477                .decrypt = kmb_ocs_aes_gcm_decrypt,
1478        },
1479        {
1480                .base = {
1481                        .cra_name = "ccm(aes)",
1482                        .cra_driver_name = "ccm-aes-keembay-ocs",
1483                        .cra_priority = KMB_OCS_PRIORITY,
1484                        .cra_flags = CRYPTO_ALG_ASYNC |
1485                                     CRYPTO_ALG_KERN_DRIVER_ONLY |
1486                                     CRYPTO_ALG_NEED_FALLBACK,
1487                        .cra_blocksize = 1,
1488                        .cra_ctxsize = sizeof(struct ocs_aes_tctx),
1489                        .cra_alignmask = 0,
1490                        .cra_module = THIS_MODULE,
1491                },
1492                .init = ocs_aes_aead_cra_init,
1493                .exit = ocs_aead_cra_exit,
1494                .ivsize = AES_BLOCK_SIZE,
1495                .maxauthsize = AES_BLOCK_SIZE,
1496                .setauthsize = kmb_ocs_aead_ccm_setauthsize,
1497                .setkey = kmb_ocs_aes_aead_set_key,
1498                .encrypt = kmb_ocs_aes_ccm_encrypt,
1499                .decrypt = kmb_ocs_aes_ccm_decrypt,
1500        },
1501        {
1502                .base = {
1503                        .cra_name = "gcm(sm4)",
1504                        .cra_driver_name = "gcm-sm4-keembay-ocs",
1505                        .cra_priority = KMB_OCS_PRIORITY,
1506                        .cra_flags = CRYPTO_ALG_ASYNC |
1507                                     CRYPTO_ALG_KERN_DRIVER_ONLY,
1508                        .cra_blocksize = 1,
1509                        .cra_ctxsize = sizeof(struct ocs_aes_tctx),
1510                        .cra_alignmask = 0,
1511                        .cra_module = THIS_MODULE,
1512                },
1513                .init = ocs_sm4_aead_cra_init,
1514                .exit = ocs_aead_cra_exit,
1515                .ivsize = GCM_AES_IV_SIZE,
1516                .maxauthsize = AES_BLOCK_SIZE,
1517                .setauthsize = kmb_ocs_aead_gcm_setauthsize,
1518                .setkey = kmb_ocs_sm4_aead_set_key,
1519                .encrypt = kmb_ocs_sm4_gcm_encrypt,
1520                .decrypt = kmb_ocs_sm4_gcm_decrypt,
1521        },
1522        {
1523                .base = {
1524                        .cra_name = "ccm(sm4)",
1525                        .cra_driver_name = "ccm-sm4-keembay-ocs",
1526                        .cra_priority = KMB_OCS_PRIORITY,
1527                        .cra_flags = CRYPTO_ALG_ASYNC |
1528                                     CRYPTO_ALG_KERN_DRIVER_ONLY,
1529                        .cra_blocksize = 1,
1530                        .cra_ctxsize = sizeof(struct ocs_aes_tctx),
1531                        .cra_alignmask = 0,
1532                        .cra_module = THIS_MODULE,
1533                },
1534                .init = ocs_sm4_aead_cra_init,
1535                .exit = ocs_aead_cra_exit,
1536                .ivsize = AES_BLOCK_SIZE,
1537                .maxauthsize = AES_BLOCK_SIZE,
1538                .setauthsize = kmb_ocs_aead_ccm_setauthsize,
1539                .setkey = kmb_ocs_sm4_aead_set_key,
1540                .encrypt = kmb_ocs_sm4_ccm_encrypt,
1541                .decrypt = kmb_ocs_sm4_ccm_decrypt,
1542        }
1543};
1544
1545static void unregister_aes_algs(struct ocs_aes_dev *aes_dev)
1546{
1547        crypto_unregister_aeads(algs_aead, ARRAY_SIZE(algs_aead));
1548        crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
1549}
1550
1551static int register_aes_algs(struct ocs_aes_dev *aes_dev)
1552{
1553        int ret;
1554
1555        /*
1556         * If any algorithm fails to register, all preceding algorithms that
1557         * were successfully registered will be automatically unregistered.
1558         */
1559        ret = crypto_register_aeads(algs_aead, ARRAY_SIZE(algs_aead));
1560        if (ret)
1561                return ret;
1562
1563        ret = crypto_register_skciphers(algs, ARRAY_SIZE(algs));
1564        if (ret)
1565                crypto_unregister_aeads(algs_aead, ARRAY_SIZE(algs));
1566
1567        return ret;
1568}
1569
1570/* Device tree driver match. */
1571static const struct of_device_id kmb_ocs_aes_of_match[] = {
1572        {
1573                .compatible = "intel,keembay-ocs-aes",
1574        },
1575        {}
1576};
1577
1578static int kmb_ocs_aes_remove(struct platform_device *pdev)
1579{
1580        struct ocs_aes_dev *aes_dev;
1581
1582        aes_dev = platform_get_drvdata(pdev);
1583        if (!aes_dev)
1584                return -ENODEV;
1585
1586        unregister_aes_algs(aes_dev);
1587
1588        spin_lock(&ocs_aes.lock);
1589        list_del(&aes_dev->list);
1590        spin_unlock(&ocs_aes.lock);
1591
1592        crypto_engine_exit(aes_dev->engine);
1593
1594        return 0;
1595}
1596
1597static int kmb_ocs_aes_probe(struct platform_device *pdev)
1598{
1599        struct device *dev = &pdev->dev;
1600        struct ocs_aes_dev *aes_dev;
1601        struct resource *aes_mem;
1602        int rc;
1603
1604        aes_dev = devm_kzalloc(dev, sizeof(*aes_dev), GFP_KERNEL);
1605        if (!aes_dev)
1606                return -ENOMEM;
1607
1608        aes_dev->dev = dev;
1609
1610        platform_set_drvdata(pdev, aes_dev);
1611
1612        rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
1613        if (rc) {
1614                dev_err(dev, "Failed to set 32 bit dma mask %d\n", rc);
1615                return rc;
1616        }
1617
1618        /* Get base register address. */
1619        aes_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1620        if (!aes_mem) {
1621                dev_err(dev, "Could not retrieve io mem resource\n");
1622                return -ENODEV;
1623        }
1624
1625        aes_dev->base_reg = devm_ioremap_resource(&pdev->dev, aes_mem);
1626        if (IS_ERR(aes_dev->base_reg)) {
1627                dev_err(dev, "Failed to get base address\n");
1628                return PTR_ERR(aes_dev->base_reg);
1629        }
1630
1631        /* Get and request IRQ */
1632        aes_dev->irq = platform_get_irq(pdev, 0);
1633        if (aes_dev->irq < 0)
1634                return aes_dev->irq;
1635
1636        rc = devm_request_threaded_irq(dev, aes_dev->irq, ocs_aes_irq_handler,
1637                                       NULL, 0, "keembay-ocs-aes", aes_dev);
1638        if (rc < 0) {
1639                dev_err(dev, "Could not request IRQ\n");
1640                return rc;
1641        }
1642
1643        INIT_LIST_HEAD(&aes_dev->list);
1644        spin_lock(&ocs_aes.lock);
1645        list_add_tail(&aes_dev->list, &ocs_aes.dev_list);
1646        spin_unlock(&ocs_aes.lock);
1647
1648        init_completion(&aes_dev->irq_completion);
1649
1650        /* Initialize crypto engine */
1651        aes_dev->engine = crypto_engine_alloc_init(dev, true);
1652        if (!aes_dev->engine)
1653                goto list_del;
1654
1655        rc = crypto_engine_start(aes_dev->engine);
1656        if (rc) {
1657                dev_err(dev, "Could not start crypto engine\n");
1658                goto cleanup;
1659        }
1660
1661        rc = register_aes_algs(aes_dev);
1662        if (rc) {
1663                dev_err(dev,
1664                        "Could not register OCS algorithms with Crypto API\n");
1665                goto cleanup;
1666        }
1667
1668        return 0;
1669
1670cleanup:
1671        crypto_engine_exit(aes_dev->engine);
1672list_del:
1673        spin_lock(&ocs_aes.lock);
1674        list_del(&aes_dev->list);
1675        spin_unlock(&ocs_aes.lock);
1676
1677        return rc;
1678}
1679
1680/* The OCS driver is a platform device. */
1681static struct platform_driver kmb_ocs_aes_driver = {
1682        .probe = kmb_ocs_aes_probe,
1683        .remove = kmb_ocs_aes_remove,
1684        .driver = {
1685                        .name = DRV_NAME,
1686                        .of_match_table = kmb_ocs_aes_of_match,
1687                },
1688};
1689
1690module_platform_driver(kmb_ocs_aes_driver);
1691
1692MODULE_DESCRIPTION("Intel Keem Bay Offload and Crypto Subsystem (OCS) AES/SM4 Driver");
1693MODULE_LICENSE("GPL");
1694
1695MODULE_ALIAS_CRYPTO("cbc-aes-keembay-ocs");
1696MODULE_ALIAS_CRYPTO("ctr-aes-keembay-ocs");
1697MODULE_ALIAS_CRYPTO("gcm-aes-keembay-ocs");
1698MODULE_ALIAS_CRYPTO("ccm-aes-keembay-ocs");
1699
1700MODULE_ALIAS_CRYPTO("cbc-sm4-keembay-ocs");
1701MODULE_ALIAS_CRYPTO("ctr-sm4-keembay-ocs");
1702MODULE_ALIAS_CRYPTO("gcm-sm4-keembay-ocs");
1703MODULE_ALIAS_CRYPTO("ccm-sm4-keembay-ocs");
1704
1705#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
1706MODULE_ALIAS_CRYPTO("ecb-aes-keembay-ocs");
1707MODULE_ALIAS_CRYPTO("ecb-sm4-keembay-ocs");
1708#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
1709
1710#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
1711MODULE_ALIAS_CRYPTO("cts-aes-keembay-ocs");
1712MODULE_ALIAS_CRYPTO("cts-sm4-keembay-ocs");
1713#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
1714