linux/drivers/crypto/amlogic/amlogic-gxl-cipher.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * amlogic-cipher.c - hardware cryptographic offloader for Amlogic GXL SoC
   4 *
   5 * Copyright (C) 2018-2019 Corentin LABBE <clabbe@baylibre.com>
   6 *
   7 * This file add support for AES cipher with 128,192,256 bits keysize in
   8 * CBC and ECB mode.
   9 */
  10
  11#include <linux/crypto.h>
  12#include <linux/delay.h>
  13#include <linux/io.h>
  14#include <crypto/scatterwalk.h>
  15#include <linux/scatterlist.h>
  16#include <linux/dma-mapping.h>
  17#include <crypto/internal/skcipher.h>
  18#include "amlogic-gxl.h"
  19
  20static int get_engine_number(struct meson_dev *mc)
  21{
  22        return atomic_inc_return(&mc->flow) % MAXFLOW;
  23}
  24
  25static bool meson_cipher_need_fallback(struct skcipher_request *areq)
  26{
  27        struct scatterlist *src_sg = areq->src;
  28        struct scatterlist *dst_sg = areq->dst;
  29
  30        if (areq->cryptlen == 0)
  31                return true;
  32
  33        if (sg_nents(src_sg) != sg_nents(dst_sg))
  34                return true;
  35
  36        /* KEY/IV descriptors use 3 desc */
  37        if (sg_nents(src_sg) > MAXDESC - 3 || sg_nents(dst_sg) > MAXDESC - 3)
  38                return true;
  39
  40        while (src_sg && dst_sg) {
  41                if ((src_sg->length % 16) != 0)
  42                        return true;
  43                if ((dst_sg->length % 16) != 0)
  44                        return true;
  45                if (src_sg->length != dst_sg->length)
  46                        return true;
  47                if (!IS_ALIGNED(src_sg->offset, sizeof(u32)))
  48                        return true;
  49                if (!IS_ALIGNED(dst_sg->offset, sizeof(u32)))
  50                        return true;
  51                src_sg = sg_next(src_sg);
  52                dst_sg = sg_next(dst_sg);
  53        }
  54
  55        return false;
  56}
  57
  58static int meson_cipher_do_fallback(struct skcipher_request *areq)
  59{
  60        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  61        struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  62        struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  63        int err;
  64#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
  65        struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
  66        struct meson_alg_template *algt;
  67
  68        algt = container_of(alg, struct meson_alg_template, alg.skcipher);
  69        algt->stat_fb++;
  70#endif
  71        skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
  72        skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
  73                                      areq->base.complete, areq->base.data);
  74        skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
  75                                   areq->cryptlen, areq->iv);
  76
  77        if (rctx->op_dir == MESON_DECRYPT)
  78                err = crypto_skcipher_decrypt(&rctx->fallback_req);
  79        else
  80                err = crypto_skcipher_encrypt(&rctx->fallback_req);
  81        return err;
  82}
  83
  84static int meson_cipher(struct skcipher_request *areq)
  85{
  86        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  87        struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  88        struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  89        struct meson_dev *mc = op->mc;
  90        struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
  91        struct meson_alg_template *algt;
  92        int flow = rctx->flow;
  93        unsigned int todo, eat, len;
  94        struct scatterlist *src_sg = areq->src;
  95        struct scatterlist *dst_sg = areq->dst;
  96        struct meson_desc *desc;
  97        int nr_sgs, nr_sgd;
  98        int i, err = 0;
  99        unsigned int keyivlen, ivsize, offset, tloffset;
 100        dma_addr_t phykeyiv;
 101        void *backup_iv = NULL, *bkeyiv;
 102        u32 v;
 103
 104        algt = container_of(alg, struct meson_alg_template, alg.skcipher);
 105
 106        dev_dbg(mc->dev, "%s %s %u %x IV(%u) key=%u flow=%d\n", __func__,
 107                crypto_tfm_alg_name(areq->base.tfm),
 108                areq->cryptlen,
 109                rctx->op_dir, crypto_skcipher_ivsize(tfm),
 110                op->keylen, flow);
 111
 112#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
 113        algt->stat_req++;
 114        mc->chanlist[flow].stat_req++;
 115#endif
 116
 117        /*
 118         * The hardware expect a list of meson_desc structures.
 119         * The 2 first structures store key
 120         * The third stores IV
 121         */
 122        bkeyiv = kzalloc(48, GFP_KERNEL | GFP_DMA);
 123        if (!bkeyiv)
 124                return -ENOMEM;
 125
 126        memcpy(bkeyiv, op->key, op->keylen);
 127        keyivlen = op->keylen;
 128
 129        ivsize = crypto_skcipher_ivsize(tfm);
 130        if (areq->iv && ivsize > 0) {
 131                if (ivsize > areq->cryptlen) {
 132                        dev_err(mc->dev, "invalid ivsize=%d vs len=%d\n", ivsize, areq->cryptlen);
 133                        err = -EINVAL;
 134                        goto theend;
 135                }
 136                memcpy(bkeyiv + 32, areq->iv, ivsize);
 137                keyivlen = 48;
 138                if (rctx->op_dir == MESON_DECRYPT) {
 139                        backup_iv = kzalloc(ivsize, GFP_KERNEL);
 140                        if (!backup_iv) {
 141                                err = -ENOMEM;
 142                                goto theend;
 143                        }
 144                        offset = areq->cryptlen - ivsize;
 145                        scatterwalk_map_and_copy(backup_iv, areq->src, offset,
 146                                                 ivsize, 0);
 147                }
 148        }
 149        if (keyivlen == 24)
 150                keyivlen = 32;
 151
 152        phykeyiv = dma_map_single(mc->dev, bkeyiv, keyivlen,
 153                                  DMA_TO_DEVICE);
 154        err = dma_mapping_error(mc->dev, phykeyiv);
 155        if (err) {
 156                dev_err(mc->dev, "Cannot DMA MAP KEY IV\n");
 157                goto theend;
 158        }
 159
 160        tloffset = 0;
 161        eat = 0;
 162        i = 0;
 163        while (keyivlen > eat) {
 164                desc = &mc->chanlist[flow].tl[tloffset];
 165                memset(desc, 0, sizeof(struct meson_desc));
 166                todo = min(keyivlen - eat, 16u);
 167                desc->t_src = cpu_to_le32(phykeyiv + i * 16);
 168                desc->t_dst = cpu_to_le32(i * 16);
 169                v = (MODE_KEY << 20) | DESC_OWN | 16;
 170                desc->t_status = cpu_to_le32(v);
 171
 172                eat += todo;
 173                i++;
 174                tloffset++;
 175        }
 176
 177        if (areq->src == areq->dst) {
 178                nr_sgs = dma_map_sg(mc->dev, areq->src, sg_nents(areq->src),
 179                                    DMA_BIDIRECTIONAL);
 180                if (nr_sgs < 0) {
 181                        dev_err(mc->dev, "Invalid SG count %d\n", nr_sgs);
 182                        err = -EINVAL;
 183                        goto theend;
 184                }
 185                nr_sgd = nr_sgs;
 186        } else {
 187                nr_sgs = dma_map_sg(mc->dev, areq->src, sg_nents(areq->src),
 188                                    DMA_TO_DEVICE);
 189                if (nr_sgs < 0 || nr_sgs > MAXDESC - 3) {
 190                        dev_err(mc->dev, "Invalid SG count %d\n", nr_sgs);
 191                        err = -EINVAL;
 192                        goto theend;
 193                }
 194                nr_sgd = dma_map_sg(mc->dev, areq->dst, sg_nents(areq->dst),
 195                                    DMA_FROM_DEVICE);
 196                if (nr_sgd < 0 || nr_sgd > MAXDESC - 3) {
 197                        dev_err(mc->dev, "Invalid SG count %d\n", nr_sgd);
 198                        err = -EINVAL;
 199                        goto theend;
 200                }
 201        }
 202
 203        src_sg = areq->src;
 204        dst_sg = areq->dst;
 205        len = areq->cryptlen;
 206        while (src_sg) {
 207                desc = &mc->chanlist[flow].tl[tloffset];
 208                memset(desc, 0, sizeof(struct meson_desc));
 209
 210                desc->t_src = cpu_to_le32(sg_dma_address(src_sg));
 211                desc->t_dst = cpu_to_le32(sg_dma_address(dst_sg));
 212                todo = min(len, sg_dma_len(src_sg));
 213                v = (op->keymode << 20) | DESC_OWN | todo | (algt->blockmode << 26);
 214                if (rctx->op_dir)
 215                        v |= DESC_ENCRYPTION;
 216                len -= todo;
 217
 218                if (!sg_next(src_sg))
 219                        v |= DESC_LAST;
 220                desc->t_status = cpu_to_le32(v);
 221                tloffset++;
 222                src_sg = sg_next(src_sg);
 223                dst_sg = sg_next(dst_sg);
 224        }
 225
 226        reinit_completion(&mc->chanlist[flow].complete);
 227        mc->chanlist[flow].status = 0;
 228        writel(mc->chanlist[flow].t_phy | 2, mc->base + (flow << 2));
 229        wait_for_completion_interruptible_timeout(&mc->chanlist[flow].complete,
 230                                                  msecs_to_jiffies(500));
 231        if (mc->chanlist[flow].status == 0) {
 232                dev_err(mc->dev, "DMA timeout for flow %d\n", flow);
 233                err = -EINVAL;
 234        }
 235
 236        dma_unmap_single(mc->dev, phykeyiv, keyivlen, DMA_TO_DEVICE);
 237
 238        if (areq->src == areq->dst) {
 239                dma_unmap_sg(mc->dev, areq->src, sg_nents(areq->src), DMA_BIDIRECTIONAL);
 240        } else {
 241                dma_unmap_sg(mc->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
 242                dma_unmap_sg(mc->dev, areq->dst, sg_nents(areq->dst), DMA_FROM_DEVICE);
 243        }
 244
 245        if (areq->iv && ivsize > 0) {
 246                if (rctx->op_dir == MESON_DECRYPT) {
 247                        memcpy(areq->iv, backup_iv, ivsize);
 248                } else {
 249                        scatterwalk_map_and_copy(areq->iv, areq->dst,
 250                                                 areq->cryptlen - ivsize,
 251                                                 ivsize, 0);
 252                }
 253        }
 254theend:
 255        kfree_sensitive(bkeyiv);
 256        kfree_sensitive(backup_iv);
 257
 258        return err;
 259}
 260
 261static int meson_handle_cipher_request(struct crypto_engine *engine,
 262                                       void *areq)
 263{
 264        int err;
 265        struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
 266
 267        err = meson_cipher(breq);
 268        crypto_finalize_skcipher_request(engine, breq, err);
 269
 270        return 0;
 271}
 272
 273int meson_skdecrypt(struct skcipher_request *areq)
 274{
 275        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
 276        struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
 277        struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
 278        struct crypto_engine *engine;
 279        int e;
 280
 281        rctx->op_dir = MESON_DECRYPT;
 282        if (meson_cipher_need_fallback(areq))
 283                return meson_cipher_do_fallback(areq);
 284        e = get_engine_number(op->mc);
 285        engine = op->mc->chanlist[e].engine;
 286        rctx->flow = e;
 287
 288        return crypto_transfer_skcipher_request_to_engine(engine, areq);
 289}
 290
 291int meson_skencrypt(struct skcipher_request *areq)
 292{
 293        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
 294        struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
 295        struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
 296        struct crypto_engine *engine;
 297        int e;
 298
 299        rctx->op_dir = MESON_ENCRYPT;
 300        if (meson_cipher_need_fallback(areq))
 301                return meson_cipher_do_fallback(areq);
 302        e = get_engine_number(op->mc);
 303        engine = op->mc->chanlist[e].engine;
 304        rctx->flow = e;
 305
 306        return crypto_transfer_skcipher_request_to_engine(engine, areq);
 307}
 308
 309int meson_cipher_init(struct crypto_tfm *tfm)
 310{
 311        struct meson_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
 312        struct meson_alg_template *algt;
 313        const char *name = crypto_tfm_alg_name(tfm);
 314        struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm);
 315        struct skcipher_alg *alg = crypto_skcipher_alg(sktfm);
 316
 317        memset(op, 0, sizeof(struct meson_cipher_tfm_ctx));
 318
 319        algt = container_of(alg, struct meson_alg_template, alg.skcipher);
 320        op->mc = algt->mc;
 321
 322        op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
 323        if (IS_ERR(op->fallback_tfm)) {
 324                dev_err(op->mc->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
 325                        name, PTR_ERR(op->fallback_tfm));
 326                return PTR_ERR(op->fallback_tfm);
 327        }
 328
 329        sktfm->reqsize = sizeof(struct meson_cipher_req_ctx) +
 330                         crypto_skcipher_reqsize(op->fallback_tfm);
 331
 332        op->enginectx.op.do_one_request = meson_handle_cipher_request;
 333        op->enginectx.op.prepare_request = NULL;
 334        op->enginectx.op.unprepare_request = NULL;
 335
 336        return 0;
 337}
 338
 339void meson_cipher_exit(struct crypto_tfm *tfm)
 340{
 341        struct meson_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
 342
 343        kfree_sensitive(op->key);
 344        crypto_free_skcipher(op->fallback_tfm);
 345}
 346
 347int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
 348                     unsigned int keylen)
 349{
 350        struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
 351        struct meson_dev *mc = op->mc;
 352
 353        switch (keylen) {
 354        case 128 / 8:
 355                op->keymode = MODE_AES_128;
 356                break;
 357        case 192 / 8:
 358                op->keymode = MODE_AES_192;
 359                break;
 360        case 256 / 8:
 361                op->keymode = MODE_AES_256;
 362                break;
 363        default:
 364                dev_dbg(mc->dev, "ERROR: Invalid keylen %u\n", keylen);
 365                return -EINVAL;
 366        }
 367        kfree_sensitive(op->key);
 368        op->keylen = keylen;
 369        op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
 370        if (!op->key)
 371                return -ENOMEM;
 372
 373        return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
 374}
 375