linux/drivers/crypto/gemini/sl3516-ce-cipher.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * sl3516-ce-cipher.c - hardware cryptographic offloader for Storlink SL3516 SoC
   4 *
   5 * Copyright (C) 2021 Corentin LABBE <clabbe@baylibre.com>
   6 *
   7 * This file adds support for AES cipher with 128,192,256 bits keysize in
   8 * ECB mode.
   9 */
  10
  11#include <linux/crypto.h>
  12#include <linux/dma-mapping.h>
  13#include <linux/delay.h>
  14#include <linux/io.h>
  15#include <linux/pm_runtime.h>
  16#include <crypto/scatterwalk.h>
  17#include <crypto/internal/skcipher.h>
  18#include "sl3516-ce.h"
  19
  20/* sl3516_ce_need_fallback - check if a request can be handled by the CE */
  21static bool sl3516_ce_need_fallback(struct skcipher_request *areq)
  22{
  23        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  24        struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  25        struct sl3516_ce_dev *ce = op->ce;
  26        struct scatterlist *in_sg = areq->src;
  27        struct scatterlist *out_sg = areq->dst;
  28        struct scatterlist *sg;
  29
  30        if (areq->cryptlen == 0 || areq->cryptlen % 16) {
  31                ce->fallback_mod16++;
  32                return true;
  33        }
  34
  35        /*
  36         * check if we have enough descriptors for TX
  37         * Note: TX need one control desc for each SG
  38         */
  39        if (sg_nents(areq->src) > MAXDESC / 2) {
  40                ce->fallback_sg_count_tx++;
  41                return true;
  42        }
  43        /* check if we have enough descriptors for RX */
  44        if (sg_nents(areq->dst) > MAXDESC) {
  45                ce->fallback_sg_count_rx++;
  46                return true;
  47        }
  48
  49        sg = areq->src;
  50        while (sg) {
  51                if ((sg->length % 16) != 0) {
  52                        ce->fallback_mod16++;
  53                        return true;
  54                }
  55                if ((sg_dma_len(sg) % 16) != 0) {
  56                        ce->fallback_mod16++;
  57                        return true;
  58                }
  59                if (!IS_ALIGNED(sg->offset, 16)) {
  60                        ce->fallback_align16++;
  61                        return true;
  62                }
  63                sg = sg_next(sg);
  64        }
  65        sg = areq->dst;
  66        while (sg) {
  67                if ((sg->length % 16) != 0) {
  68                        ce->fallback_mod16++;
  69                        return true;
  70                }
  71                if ((sg_dma_len(sg) % 16) != 0) {
  72                        ce->fallback_mod16++;
  73                        return true;
  74                }
  75                if (!IS_ALIGNED(sg->offset, 16)) {
  76                        ce->fallback_align16++;
  77                        return true;
  78                }
  79                sg = sg_next(sg);
  80        }
  81
  82        /* need same numbers of SG (with same length) for source and destination */
  83        in_sg = areq->src;
  84        out_sg = areq->dst;
  85        while (in_sg && out_sg) {
  86                if (in_sg->length != out_sg->length) {
  87                        ce->fallback_not_same_len++;
  88                        return true;
  89                }
  90                in_sg = sg_next(in_sg);
  91                out_sg = sg_next(out_sg);
  92        }
  93        if (in_sg || out_sg)
  94                return true;
  95
  96        return false;
  97}
  98
  99static int sl3516_ce_cipher_fallback(struct skcipher_request *areq)
 100{
 101        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
 102        struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
 103        struct sl3516_ce_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
 104        struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
 105        struct sl3516_ce_alg_template *algt;
 106        int err;
 107
 108        algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher);
 109        algt->stat_fb++;
 110
 111        skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
 112        skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
 113                                      areq->base.complete, areq->base.data);
 114        skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
 115                                   areq->cryptlen, areq->iv);
 116        if (rctx->op_dir == CE_DECRYPTION)
 117                err = crypto_skcipher_decrypt(&rctx->fallback_req);
 118        else
 119                err = crypto_skcipher_encrypt(&rctx->fallback_req);
 120        return err;
 121}
 122
 123static int sl3516_ce_cipher(struct skcipher_request *areq)
 124{
 125        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
 126        struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
 127        struct sl3516_ce_dev *ce = op->ce;
 128        struct sl3516_ce_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
 129        struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
 130        struct sl3516_ce_alg_template *algt;
 131        struct scatterlist *sg;
 132        unsigned int todo, len;
 133        struct pkt_control_ecb *ecb;
 134        int nr_sgs = 0;
 135        int nr_sgd = 0;
 136        int err = 0;
 137        int i;
 138
 139        algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher);
 140
 141        dev_dbg(ce->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
 142                crypto_tfm_alg_name(areq->base.tfm),
 143                areq->cryptlen,
 144                rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm),
 145                op->keylen);
 146
 147        algt->stat_req++;
 148
 149        if (areq->src == areq->dst) {
 150                nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
 151                                    DMA_BIDIRECTIONAL);
 152                if (nr_sgs <= 0 || nr_sgs > MAXDESC / 2) {
 153                        dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
 154                        err = -EINVAL;
 155                        goto theend;
 156                }
 157                nr_sgd = nr_sgs;
 158        } else {
 159                nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
 160                                    DMA_TO_DEVICE);
 161                if (nr_sgs <= 0 || nr_sgs > MAXDESC / 2) {
 162                        dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
 163                        err = -EINVAL;
 164                        goto theend;
 165                }
 166                nr_sgd = dma_map_sg(ce->dev, areq->dst, sg_nents(areq->dst),
 167                                    DMA_FROM_DEVICE);
 168                if (nr_sgd <= 0 || nr_sgd > MAXDESC) {
 169                        dev_err(ce->dev, "Invalid sg number %d\n", nr_sgd);
 170                        err = -EINVAL;
 171                        goto theend_sgs;
 172                }
 173        }
 174
 175        len = areq->cryptlen;
 176        i = 0;
 177        sg = areq->src;
 178        while (i < nr_sgs && sg && len) {
 179                if (sg_dma_len(sg) == 0)
 180                        goto sgs_next;
 181                rctx->t_src[i].addr = sg_dma_address(sg);
 182                todo = min(len, sg_dma_len(sg));
 183                rctx->t_src[i].len = todo;
 184                dev_dbg(ce->dev, "%s total=%u SGS(%d %u off=%d) todo=%u\n", __func__,
 185                        areq->cryptlen, i, rctx->t_src[i].len, sg->offset, todo);
 186                len -= todo;
 187                i++;
 188sgs_next:
 189                sg = sg_next(sg);
 190        }
 191        if (len > 0) {
 192                dev_err(ce->dev, "remaining len %d/%u nr_sgs=%d\n", len, areq->cryptlen, nr_sgs);
 193                err = -EINVAL;
 194                goto theend_sgs;
 195        }
 196
 197        len = areq->cryptlen;
 198        i = 0;
 199        sg = areq->dst;
 200        while (i < nr_sgd && sg && len) {
 201                if (sg_dma_len(sg) == 0)
 202                        goto sgd_next;
 203                rctx->t_dst[i].addr = sg_dma_address(sg);
 204                todo = min(len, sg_dma_len(sg));
 205                rctx->t_dst[i].len = todo;
 206                dev_dbg(ce->dev, "%s total=%u SGD(%d %u off=%d) todo=%u\n", __func__,
 207                        areq->cryptlen, i, rctx->t_dst[i].len, sg->offset, todo);
 208                len -= todo;
 209                i++;
 210
 211sgd_next:
 212                sg = sg_next(sg);
 213        }
 214        if (len > 0) {
 215                dev_err(ce->dev, "remaining len %d\n", len);
 216                err = -EINVAL;
 217                goto theend_sgs;
 218        }
 219
 220        switch (algt->mode) {
 221        case ECB_AES:
 222                rctx->pctrllen = sizeof(struct pkt_control_ecb);
 223                ecb = (struct pkt_control_ecb *)ce->pctrl;
 224
 225                rctx->tqflag = TQ0_TYPE_CTRL;
 226                rctx->tqflag |= TQ1_CIPHER;
 227                ecb->control.op_mode = rctx->op_dir;
 228                ecb->control.cipher_algorithm = ECB_AES;
 229                ecb->cipher.header_len = 0;
 230                ecb->cipher.algorithm_len = areq->cryptlen;
 231                cpu_to_be32_array((__be32 *)ecb->key, (u32 *)op->key, op->keylen / 4);
 232                rctx->h = &ecb->cipher;
 233
 234                rctx->tqflag |= TQ4_KEY0;
 235                rctx->tqflag |= TQ5_KEY4;
 236                rctx->tqflag |= TQ6_KEY6;
 237                ecb->control.aesnk = op->keylen / 4;
 238                break;
 239        }
 240
 241        rctx->nr_sgs = nr_sgs;
 242        rctx->nr_sgd = nr_sgd;
 243        err = sl3516_ce_run_task(ce, rctx, crypto_tfm_alg_name(areq->base.tfm));
 244
 245theend_sgs:
 246        if (areq->src == areq->dst) {
 247                dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
 248                             DMA_BIDIRECTIONAL);
 249        } else {
 250                dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
 251                             DMA_TO_DEVICE);
 252                dma_unmap_sg(ce->dev, areq->dst, sg_nents(areq->dst),
 253                             DMA_FROM_DEVICE);
 254        }
 255
 256theend:
 257
 258        return err;
 259}
 260
 261static int sl3516_ce_handle_cipher_request(struct crypto_engine *engine, void *areq)
 262{
 263        int err;
 264        struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
 265
 266        err = sl3516_ce_cipher(breq);
 267        crypto_finalize_skcipher_request(engine, breq, err);
 268
 269        return 0;
 270}
 271
 272int sl3516_ce_skdecrypt(struct skcipher_request *areq)
 273{
 274        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
 275        struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
 276        struct sl3516_ce_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
 277        struct crypto_engine *engine;
 278
 279        memset(rctx, 0, sizeof(struct sl3516_ce_cipher_req_ctx));
 280        rctx->op_dir = CE_DECRYPTION;
 281
 282        if (sl3516_ce_need_fallback(areq))
 283                return sl3516_ce_cipher_fallback(areq);
 284
 285        engine = op->ce->engine;
 286
 287        return crypto_transfer_skcipher_request_to_engine(engine, areq);
 288}
 289
 290int sl3516_ce_skencrypt(struct skcipher_request *areq)
 291{
 292        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
 293        struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
 294        struct sl3516_ce_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
 295        struct crypto_engine *engine;
 296
 297        memset(rctx, 0, sizeof(struct sl3516_ce_cipher_req_ctx));
 298        rctx->op_dir = CE_ENCRYPTION;
 299
 300        if (sl3516_ce_need_fallback(areq))
 301                return sl3516_ce_cipher_fallback(areq);
 302
 303        engine = op->ce->engine;
 304
 305        return crypto_transfer_skcipher_request_to_engine(engine, areq);
 306}
 307
 308int sl3516_ce_cipher_init(struct crypto_tfm *tfm)
 309{
 310        struct sl3516_ce_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
 311        struct sl3516_ce_alg_template *algt;
 312        const char *name = crypto_tfm_alg_name(tfm);
 313        struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm);
 314        struct skcipher_alg *alg = crypto_skcipher_alg(sktfm);
 315        int err;
 316
 317        memset(op, 0, sizeof(struct sl3516_ce_cipher_tfm_ctx));
 318
 319        algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher);
 320        op->ce = algt->ce;
 321
 322        op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
 323        if (IS_ERR(op->fallback_tfm)) {
 324                dev_err(op->ce->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
 325                        name, PTR_ERR(op->fallback_tfm));
 326                return PTR_ERR(op->fallback_tfm);
 327        }
 328
 329        sktfm->reqsize = sizeof(struct sl3516_ce_cipher_req_ctx) +
 330                         crypto_skcipher_reqsize(op->fallback_tfm);
 331
 332        dev_info(op->ce->dev, "Fallback for %s is %s\n",
 333                 crypto_tfm_alg_driver_name(&sktfm->base),
 334                 crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)));
 335
 336        op->enginectx.op.do_one_request = sl3516_ce_handle_cipher_request;
 337        op->enginectx.op.prepare_request = NULL;
 338        op->enginectx.op.unprepare_request = NULL;
 339
 340        err = pm_runtime_get_sync(op->ce->dev);
 341        if (err < 0)
 342                goto error_pm;
 343
 344        return 0;
 345error_pm:
 346        pm_runtime_put_noidle(op->ce->dev);
 347        crypto_free_skcipher(op->fallback_tfm);
 348        return err;
 349}
 350
 351void sl3516_ce_cipher_exit(struct crypto_tfm *tfm)
 352{
 353        struct sl3516_ce_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
 354
 355        kfree_sensitive(op->key);
 356        crypto_free_skcipher(op->fallback_tfm);
 357        pm_runtime_put_sync_suspend(op->ce->dev);
 358}
 359
 360int sl3516_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
 361                         unsigned int keylen)
 362{
 363        struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
 364        struct sl3516_ce_dev *ce = op->ce;
 365
 366        switch (keylen) {
 367        case 128 / 8:
 368                break;
 369        case 192 / 8:
 370                break;
 371        case 256 / 8:
 372                break;
 373        default:
 374                dev_dbg(ce->dev, "ERROR: Invalid keylen %u\n", keylen);
 375                return -EINVAL;
 376        }
 377        kfree_sensitive(op->key);
 378        op->keylen = keylen;
 379        op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
 380        if (!op->key)
 381                return -ENOMEM;
 382
 383        crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
 384        crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
 385
 386        return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
 387}
 388