linux/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * sun8i-ss-cipher.c - hardware cryptographic offloader for
   4 * Allwinner A80/A83T SoC
   5 *
   6 * Copyright (C) 2016-2019 Corentin LABBE <clabbe.montjoie@gmail.com>
   7 *
   8 * This file add support for AES cipher with 128,192,256 bits keysize in
   9 * CBC and ECB mode.
  10 *
  11 * You could find a link for the datasheet in Documentation/arm/sunxi.rst
  12 */
  13
  14#include <linux/crypto.h>
  15#include <linux/dma-mapping.h>
  16#include <linux/io.h>
  17#include <linux/pm_runtime.h>
  18#include <crypto/scatterwalk.h>
  19#include <crypto/internal/skcipher.h>
  20#include "sun8i-ss.h"
  21
  22static bool sun8i_ss_need_fallback(struct skcipher_request *areq)
  23{
  24        struct scatterlist *in_sg = areq->src;
  25        struct scatterlist *out_sg = areq->dst;
  26        struct scatterlist *sg;
  27
  28        if (areq->cryptlen == 0 || areq->cryptlen % 16)
  29                return true;
  30
  31        if (sg_nents(areq->src) > 8 || sg_nents(areq->dst) > 8)
  32                return true;
  33
  34        sg = areq->src;
  35        while (sg) {
  36                if ((sg->length % 16) != 0)
  37                        return true;
  38                if ((sg_dma_len(sg) % 16) != 0)
  39                        return true;
  40                if (!IS_ALIGNED(sg->offset, 16))
  41                        return true;
  42                sg = sg_next(sg);
  43        }
  44        sg = areq->dst;
  45        while (sg) {
  46                if ((sg->length % 16) != 0)
  47                        return true;
  48                if ((sg_dma_len(sg) % 16) != 0)
  49                        return true;
  50                if (!IS_ALIGNED(sg->offset, 16))
  51                        return true;
  52                sg = sg_next(sg);
  53        }
  54
  55        /* SS need same numbers of SG (with same length) for source and destination */
  56        in_sg = areq->src;
  57        out_sg = areq->dst;
  58        while (in_sg && out_sg) {
  59                if (in_sg->length != out_sg->length)
  60                        return true;
  61                in_sg = sg_next(in_sg);
  62                out_sg = sg_next(out_sg);
  63        }
  64        if (in_sg || out_sg)
  65                return true;
  66        return false;
  67}
  68
  69static int sun8i_ss_cipher_fallback(struct skcipher_request *areq)
  70{
  71        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  72        struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  73        struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  74        int err;
  75
  76#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
  77        struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
  78        struct sun8i_ss_alg_template *algt;
  79
  80        algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
  81        algt->stat_fb++;
  82#endif
  83        skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
  84        skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
  85                                      areq->base.complete, areq->base.data);
  86        skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
  87                                   areq->cryptlen, areq->iv);
  88        if (rctx->op_dir & SS_DECRYPTION)
  89                err = crypto_skcipher_decrypt(&rctx->fallback_req);
  90        else
  91                err = crypto_skcipher_encrypt(&rctx->fallback_req);
  92        return err;
  93}
  94
  95static int sun8i_ss_cipher(struct skcipher_request *areq)
  96{
  97        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  98        struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  99        struct sun8i_ss_dev *ss = op->ss;
 100        struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
 101        struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
 102        struct sun8i_ss_alg_template *algt;
 103        struct scatterlist *sg;
 104        unsigned int todo, len, offset, ivsize;
 105        void *backup_iv = NULL;
 106        int nr_sgs = 0;
 107        int nr_sgd = 0;
 108        int err = 0;
 109        int i;
 110
 111        algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
 112
 113        dev_dbg(ss->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
 114                crypto_tfm_alg_name(areq->base.tfm),
 115                areq->cryptlen,
 116                rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm),
 117                op->keylen);
 118
 119#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
 120        algt->stat_req++;
 121#endif
 122
 123        rctx->op_mode = ss->variant->op_mode[algt->ss_blockmode];
 124        rctx->method = ss->variant->alg_cipher[algt->ss_algo_id];
 125        rctx->keylen = op->keylen;
 126
 127        rctx->p_key = dma_map_single(ss->dev, op->key, op->keylen, DMA_TO_DEVICE);
 128        if (dma_mapping_error(ss->dev, rctx->p_key)) {
 129                dev_err(ss->dev, "Cannot DMA MAP KEY\n");
 130                err = -EFAULT;
 131                goto theend;
 132        }
 133
 134        ivsize = crypto_skcipher_ivsize(tfm);
 135        if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
 136                rctx->ivlen = ivsize;
 137                rctx->biv = kzalloc(ivsize, GFP_KERNEL | GFP_DMA);
 138                if (!rctx->biv) {
 139                        err = -ENOMEM;
 140                        goto theend_key;
 141                }
 142                if (rctx->op_dir & SS_DECRYPTION) {
 143                        backup_iv = kzalloc(ivsize, GFP_KERNEL);
 144                        if (!backup_iv) {
 145                                err = -ENOMEM;
 146                                goto theend_key;
 147                        }
 148                        offset = areq->cryptlen - ivsize;
 149                        scatterwalk_map_and_copy(backup_iv, areq->src, offset,
 150                                                 ivsize, 0);
 151                }
 152                memcpy(rctx->biv, areq->iv, ivsize);
 153                rctx->p_iv = dma_map_single(ss->dev, rctx->biv, rctx->ivlen,
 154                                            DMA_TO_DEVICE);
 155                if (dma_mapping_error(ss->dev, rctx->p_iv)) {
 156                        dev_err(ss->dev, "Cannot DMA MAP IV\n");
 157                        err = -ENOMEM;
 158                        goto theend_iv;
 159                }
 160        }
 161        if (areq->src == areq->dst) {
 162                nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src),
 163                                    DMA_BIDIRECTIONAL);
 164                if (nr_sgs <= 0 || nr_sgs > 8) {
 165                        dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs);
 166                        err = -EINVAL;
 167                        goto theend_iv;
 168                }
 169                nr_sgd = nr_sgs;
 170        } else {
 171                nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src),
 172                                    DMA_TO_DEVICE);
 173                if (nr_sgs <= 0 || nr_sgs > 8) {
 174                        dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs);
 175                        err = -EINVAL;
 176                        goto theend_iv;
 177                }
 178                nr_sgd = dma_map_sg(ss->dev, areq->dst, sg_nents(areq->dst),
 179                                    DMA_FROM_DEVICE);
 180                if (nr_sgd <= 0 || nr_sgd > 8) {
 181                        dev_err(ss->dev, "Invalid sg number %d\n", nr_sgd);
 182                        err = -EINVAL;
 183                        goto theend_sgs;
 184                }
 185        }
 186
 187        len = areq->cryptlen;
 188        i = 0;
 189        sg = areq->src;
 190        while (i < nr_sgs && sg && len) {
 191                if (sg_dma_len(sg) == 0)
 192                        goto sgs_next;
 193                rctx->t_src[i].addr = sg_dma_address(sg);
 194                todo = min(len, sg_dma_len(sg));
 195                rctx->t_src[i].len = todo / 4;
 196                dev_dbg(ss->dev, "%s total=%u SGS(%d %u off=%d) todo=%u\n", __func__,
 197                        areq->cryptlen, i, rctx->t_src[i].len, sg->offset, todo);
 198                len -= todo;
 199                i++;
 200sgs_next:
 201                sg = sg_next(sg);
 202        }
 203        if (len > 0) {
 204                dev_err(ss->dev, "remaining len %d\n", len);
 205                err = -EINVAL;
 206                goto theend_sgs;
 207        }
 208
 209        len = areq->cryptlen;
 210        i = 0;
 211        sg = areq->dst;
 212        while (i < nr_sgd && sg && len) {
 213                if (sg_dma_len(sg) == 0)
 214                        goto sgd_next;
 215                rctx->t_dst[i].addr = sg_dma_address(sg);
 216                todo = min(len, sg_dma_len(sg));
 217                rctx->t_dst[i].len = todo / 4;
 218                dev_dbg(ss->dev, "%s total=%u SGD(%d %u off=%d) todo=%u\n", __func__,
 219                        areq->cryptlen, i, rctx->t_dst[i].len, sg->offset, todo);
 220                len -= todo;
 221                i++;
 222sgd_next:
 223                sg = sg_next(sg);
 224        }
 225        if (len > 0) {
 226                dev_err(ss->dev, "remaining len %d\n", len);
 227                err = -EINVAL;
 228                goto theend_sgs;
 229        }
 230
 231        err = sun8i_ss_run_task(ss, rctx, crypto_tfm_alg_name(areq->base.tfm));
 232
 233theend_sgs:
 234        if (areq->src == areq->dst) {
 235                dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src),
 236                             DMA_BIDIRECTIONAL);
 237        } else {
 238                dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src),
 239                             DMA_TO_DEVICE);
 240                dma_unmap_sg(ss->dev, areq->dst, sg_nents(areq->dst),
 241                             DMA_FROM_DEVICE);
 242        }
 243
 244theend_iv:
 245        if (rctx->p_iv)
 246                dma_unmap_single(ss->dev, rctx->p_iv, rctx->ivlen,
 247                                 DMA_TO_DEVICE);
 248
 249        if (areq->iv && ivsize > 0) {
 250                if (rctx->biv) {
 251                        offset = areq->cryptlen - ivsize;
 252                        if (rctx->op_dir & SS_DECRYPTION) {
 253                                memcpy(areq->iv, backup_iv, ivsize);
 254                                kfree_sensitive(backup_iv);
 255                        } else {
 256                                scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
 257                                                         ivsize, 0);
 258                        }
 259                        kfree(rctx->biv);
 260                }
 261        }
 262
 263theend_key:
 264        dma_unmap_single(ss->dev, rctx->p_key, op->keylen, DMA_TO_DEVICE);
 265
 266theend:
 267
 268        return err;
 269}
 270
 271static int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *areq)
 272{
 273        int err;
 274        struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
 275
 276        err = sun8i_ss_cipher(breq);
 277        crypto_finalize_skcipher_request(engine, breq, err);
 278
 279        return 0;
 280}
 281
 282int sun8i_ss_skdecrypt(struct skcipher_request *areq)
 283{
 284        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
 285        struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
 286        struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
 287        struct crypto_engine *engine;
 288        int e;
 289
 290        memset(rctx, 0, sizeof(struct sun8i_cipher_req_ctx));
 291        rctx->op_dir = SS_DECRYPTION;
 292
 293        if (sun8i_ss_need_fallback(areq))
 294                return sun8i_ss_cipher_fallback(areq);
 295
 296        e = sun8i_ss_get_engine_number(op->ss);
 297        engine = op->ss->flows[e].engine;
 298        rctx->flow = e;
 299
 300        return crypto_transfer_skcipher_request_to_engine(engine, areq);
 301}
 302
 303int sun8i_ss_skencrypt(struct skcipher_request *areq)
 304{
 305        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
 306        struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
 307        struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
 308        struct crypto_engine *engine;
 309        int e;
 310
 311        memset(rctx, 0, sizeof(struct sun8i_cipher_req_ctx));
 312        rctx->op_dir = SS_ENCRYPTION;
 313
 314        if (sun8i_ss_need_fallback(areq))
 315                return sun8i_ss_cipher_fallback(areq);
 316
 317        e = sun8i_ss_get_engine_number(op->ss);
 318        engine = op->ss->flows[e].engine;
 319        rctx->flow = e;
 320
 321        return crypto_transfer_skcipher_request_to_engine(engine, areq);
 322}
 323
 324int sun8i_ss_cipher_init(struct crypto_tfm *tfm)
 325{
 326        struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
 327        struct sun8i_ss_alg_template *algt;
 328        const char *name = crypto_tfm_alg_name(tfm);
 329        struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm);
 330        struct skcipher_alg *alg = crypto_skcipher_alg(sktfm);
 331        int err;
 332
 333        memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx));
 334
 335        algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
 336        op->ss = algt->ss;
 337
 338        op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
 339        if (IS_ERR(op->fallback_tfm)) {
 340                dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
 341                        name, PTR_ERR(op->fallback_tfm));
 342                return PTR_ERR(op->fallback_tfm);
 343        }
 344
 345        sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx) +
 346                         crypto_skcipher_reqsize(op->fallback_tfm);
 347
 348
 349        dev_info(op->ss->dev, "Fallback for %s is %s\n",
 350                 crypto_tfm_alg_driver_name(&sktfm->base),
 351                 crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)));
 352
 353        op->enginectx.op.do_one_request = sun8i_ss_handle_cipher_request;
 354        op->enginectx.op.prepare_request = NULL;
 355        op->enginectx.op.unprepare_request = NULL;
 356
 357        err = pm_runtime_resume_and_get(op->ss->dev);
 358        if (err < 0) {
 359                dev_err(op->ss->dev, "pm error %d\n", err);
 360                goto error_pm;
 361        }
 362
 363        return 0;
 364error_pm:
 365        crypto_free_skcipher(op->fallback_tfm);
 366        return err;
 367}
 368
 369void sun8i_ss_cipher_exit(struct crypto_tfm *tfm)
 370{
 371        struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
 372
 373        kfree_sensitive(op->key);
 374        crypto_free_skcipher(op->fallback_tfm);
 375        pm_runtime_put_sync(op->ss->dev);
 376}
 377
 378int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
 379                        unsigned int keylen)
 380{
 381        struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
 382        struct sun8i_ss_dev *ss = op->ss;
 383
 384        switch (keylen) {
 385        case 128 / 8:
 386                break;
 387        case 192 / 8:
 388                break;
 389        case 256 / 8:
 390                break;
 391        default:
 392                dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
 393                return -EINVAL;
 394        }
 395        kfree_sensitive(op->key);
 396        op->keylen = keylen;
 397        op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
 398        if (!op->key)
 399                return -ENOMEM;
 400
 401        crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
 402        crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
 403
 404        return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
 405}
 406
 407int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
 408                         unsigned int keylen)
 409{
 410        struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
 411        struct sun8i_ss_dev *ss = op->ss;
 412
 413        if (unlikely(keylen != 3 * DES_KEY_SIZE)) {
 414                dev_dbg(ss->dev, "Invalid keylen %u\n", keylen);
 415                return -EINVAL;
 416        }
 417
 418        kfree_sensitive(op->key);
 419        op->keylen = keylen;
 420        op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
 421        if (!op->key)
 422                return -ENOMEM;
 423
 424        crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
 425        crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
 426
 427        return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
 428}
 429