linux/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * sun8i-ss-hash.c - hardware cryptographic offloader for
   4 * Allwinner A80/A83T SoC
   5 *
   6 * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com>
   7 *
   8 * This file add support for MD5 and SHA1/SHA224/SHA256.
   9 *
  10 * You could find the datasheet in Documentation/arm/sunxi.rst
  11 */
  12#include <linux/dma-mapping.h>
  13#include <linux/pm_runtime.h>
  14#include <linux/scatterlist.h>
  15#include <crypto/internal/hash.h>
  16#include <crypto/sha1.h>
  17#include <crypto/sha2.h>
  18#include <crypto/md5.h>
  19#include "sun8i-ss.h"
  20
  21int sun8i_ss_hash_crainit(struct crypto_tfm *tfm)
  22{
  23        struct sun8i_ss_hash_tfm_ctx *op = crypto_tfm_ctx(tfm);
  24        struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
  25        struct sun8i_ss_alg_template *algt;
  26        int err;
  27
  28        memset(op, 0, sizeof(struct sun8i_ss_hash_tfm_ctx));
  29
  30        algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
  31        op->ss = algt->ss;
  32
  33        op->enginectx.op.do_one_request = sun8i_ss_hash_run;
  34        op->enginectx.op.prepare_request = NULL;
  35        op->enginectx.op.unprepare_request = NULL;
  36
  37        /* FALLBACK */
  38        op->fallback_tfm = crypto_alloc_ahash(crypto_tfm_alg_name(tfm), 0,
  39                                              CRYPTO_ALG_NEED_FALLBACK);
  40        if (IS_ERR(op->fallback_tfm)) {
  41                dev_err(algt->ss->dev, "Fallback driver could no be loaded\n");
  42                return PTR_ERR(op->fallback_tfm);
  43        }
  44
  45        if (algt->alg.hash.halg.statesize < crypto_ahash_statesize(op->fallback_tfm))
  46                algt->alg.hash.halg.statesize = crypto_ahash_statesize(op->fallback_tfm);
  47
  48        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  49                                 sizeof(struct sun8i_ss_hash_reqctx) +
  50                                 crypto_ahash_reqsize(op->fallback_tfm));
  51
  52        dev_info(op->ss->dev, "Fallback for %s is %s\n",
  53                 crypto_tfm_alg_driver_name(tfm),
  54                 crypto_tfm_alg_driver_name(&op->fallback_tfm->base));
  55        err = pm_runtime_get_sync(op->ss->dev);
  56        if (err < 0)
  57                goto error_pm;
  58        return 0;
  59error_pm:
  60        pm_runtime_put_noidle(op->ss->dev);
  61        crypto_free_ahash(op->fallback_tfm);
  62        return err;
  63}
  64
  65void sun8i_ss_hash_craexit(struct crypto_tfm *tfm)
  66{
  67        struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_tfm_ctx(tfm);
  68
  69        crypto_free_ahash(tfmctx->fallback_tfm);
  70        pm_runtime_put_sync_suspend(tfmctx->ss->dev);
  71}
  72
  73int sun8i_ss_hash_init(struct ahash_request *areq)
  74{
  75        struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
  76        struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  77        struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
  78
  79        memset(rctx, 0, sizeof(struct sun8i_ss_hash_reqctx));
  80
  81        ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
  82        rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
  83
  84        return crypto_ahash_init(&rctx->fallback_req);
  85}
  86
  87int sun8i_ss_hash_export(struct ahash_request *areq, void *out)
  88{
  89        struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
  90        struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  91        struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
  92
  93        ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
  94        rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
  95
  96        return crypto_ahash_export(&rctx->fallback_req, out);
  97}
  98
  99int sun8i_ss_hash_import(struct ahash_request *areq, const void *in)
 100{
 101        struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
 102        struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
 103        struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
 104
 105        ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
 106        rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
 107
 108        return crypto_ahash_import(&rctx->fallback_req, in);
 109}
 110
 111int sun8i_ss_hash_final(struct ahash_request *areq)
 112{
 113        struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
 114        struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
 115        struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
 116#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
 117        struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
 118        struct sun8i_ss_alg_template *algt;
 119#endif
 120
 121        ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
 122        rctx->fallback_req.base.flags = areq->base.flags &
 123                                        CRYPTO_TFM_REQ_MAY_SLEEP;
 124        rctx->fallback_req.result = areq->result;
 125
 126#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
 127        algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
 128        algt->stat_fb++;
 129#endif
 130
 131        return crypto_ahash_final(&rctx->fallback_req);
 132}
 133
 134int sun8i_ss_hash_update(struct ahash_request *areq)
 135{
 136        struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
 137        struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
 138        struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
 139
 140        ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
 141        rctx->fallback_req.base.flags = areq->base.flags &
 142                                        CRYPTO_TFM_REQ_MAY_SLEEP;
 143        rctx->fallback_req.nbytes = areq->nbytes;
 144        rctx->fallback_req.src = areq->src;
 145
 146        return crypto_ahash_update(&rctx->fallback_req);
 147}
 148
 149int sun8i_ss_hash_finup(struct ahash_request *areq)
 150{
 151        struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
 152        struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
 153        struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
 154#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
 155        struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
 156        struct sun8i_ss_alg_template *algt;
 157#endif
 158
 159        ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
 160        rctx->fallback_req.base.flags = areq->base.flags &
 161                                        CRYPTO_TFM_REQ_MAY_SLEEP;
 162
 163        rctx->fallback_req.nbytes = areq->nbytes;
 164        rctx->fallback_req.src = areq->src;
 165        rctx->fallback_req.result = areq->result;
 166#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
 167        algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
 168        algt->stat_fb++;
 169#endif
 170
 171        return crypto_ahash_finup(&rctx->fallback_req);
 172}
 173
 174static int sun8i_ss_hash_digest_fb(struct ahash_request *areq)
 175{
 176        struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
 177        struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
 178        struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
 179#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
 180        struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
 181        struct sun8i_ss_alg_template *algt;
 182#endif
 183
 184        ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
 185        rctx->fallback_req.base.flags = areq->base.flags &
 186                                        CRYPTO_TFM_REQ_MAY_SLEEP;
 187
 188        rctx->fallback_req.nbytes = areq->nbytes;
 189        rctx->fallback_req.src = areq->src;
 190        rctx->fallback_req.result = areq->result;
 191#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
 192        algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
 193        algt->stat_fb++;
 194#endif
 195
 196        return crypto_ahash_digest(&rctx->fallback_req);
 197}
 198
 199static int sun8i_ss_run_hash_task(struct sun8i_ss_dev *ss,
 200                                  struct sun8i_ss_hash_reqctx *rctx,
 201                                  const char *name)
 202{
 203        int flow = rctx->flow;
 204        u32 v = SS_START;
 205        int i;
 206
 207#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
 208        ss->flows[flow].stat_req++;
 209#endif
 210
 211        /* choose between stream0/stream1 */
 212        if (flow)
 213                v |= SS_FLOW1;
 214        else
 215                v |= SS_FLOW0;
 216
 217        v |= rctx->method;
 218
 219        for (i = 0; i < MAX_SG; i++) {
 220                if (!rctx->t_dst[i].addr)
 221                        break;
 222
 223                mutex_lock(&ss->mlock);
 224                if (i > 0) {
 225                        v |= BIT(17);
 226                        writel(rctx->t_dst[i - 1].addr, ss->base + SS_KEY_ADR_REG);
 227                        writel(rctx->t_dst[i - 1].addr, ss->base + SS_IV_ADR_REG);
 228                }
 229
 230                dev_dbg(ss->dev,
 231                        "Processing SG %d on flow %d %s ctl=%x %d to %d method=%x src=%x dst=%x\n",
 232                        i, flow, name, v,
 233                        rctx->t_src[i].len, rctx->t_dst[i].len,
 234                        rctx->method, rctx->t_src[i].addr, rctx->t_dst[i].addr);
 235
 236                writel(rctx->t_src[i].addr, ss->base + SS_SRC_ADR_REG);
 237                writel(rctx->t_dst[i].addr, ss->base + SS_DST_ADR_REG);
 238                writel(rctx->t_src[i].len, ss->base + SS_LEN_ADR_REG);
 239                writel(BIT(0) | BIT(1), ss->base + SS_INT_CTL_REG);
 240
 241                reinit_completion(&ss->flows[flow].complete);
 242                ss->flows[flow].status = 0;
 243                wmb();
 244
 245                writel(v, ss->base + SS_CTL_REG);
 246                mutex_unlock(&ss->mlock);
 247                wait_for_completion_interruptible_timeout(&ss->flows[flow].complete,
 248                                                          msecs_to_jiffies(2000));
 249                if (ss->flows[flow].status == 0) {
 250                        dev_err(ss->dev, "DMA timeout for %s\n", name);
 251                        return -EFAULT;
 252                }
 253        }
 254
 255        return 0;
 256}
 257
 258static bool sun8i_ss_hash_need_fallback(struct ahash_request *areq)
 259{
 260        struct scatterlist *sg;
 261
 262        if (areq->nbytes == 0)
 263                return true;
 264        /* we need to reserve one SG for the padding one */
 265        if (sg_nents(areq->src) > MAX_SG - 1)
 266                return true;
 267        sg = areq->src;
 268        while (sg) {
 269                /* SS can operate hash only on full block size
 270                 * since SS support only MD5,sha1,sha224 and sha256, blocksize
 271                 * is always 64
 272                 * TODO: handle request if last SG is not len%64
 273                 * but this will need to copy data on a new SG of size=64
 274                 */
 275                if (sg->length % 64 || !IS_ALIGNED(sg->offset, sizeof(u32)))
 276                        return true;
 277                sg = sg_next(sg);
 278        }
 279        return false;
 280}
 281
 282int sun8i_ss_hash_digest(struct ahash_request *areq)
 283{
 284        struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
 285        struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
 286        struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
 287        struct sun8i_ss_alg_template *algt;
 288        struct sun8i_ss_dev *ss;
 289        struct crypto_engine *engine;
 290        struct scatterlist *sg;
 291        int nr_sgs, e, i;
 292
 293        if (sun8i_ss_hash_need_fallback(areq))
 294                return sun8i_ss_hash_digest_fb(areq);
 295
 296        nr_sgs = sg_nents(areq->src);
 297        if (nr_sgs > MAX_SG - 1)
 298                return sun8i_ss_hash_digest_fb(areq);
 299
 300        for_each_sg(areq->src, sg, nr_sgs, i) {
 301                if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
 302                        return sun8i_ss_hash_digest_fb(areq);
 303        }
 304
 305        algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
 306        ss = algt->ss;
 307
 308        e = sun8i_ss_get_engine_number(ss);
 309        rctx->flow = e;
 310        engine = ss->flows[e].engine;
 311
 312        return crypto_transfer_hash_request_to_engine(engine, areq);
 313}
 314
 315/* sun8i_ss_hash_run - run an ahash request
 316 * Send the data of the request to the SS along with an extra SG with padding
 317 */
 318int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
 319{
 320        struct ahash_request *areq = container_of(breq, struct ahash_request, base);
 321        struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
 322        struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
 323        struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
 324        struct sun8i_ss_alg_template *algt;
 325        struct sun8i_ss_dev *ss;
 326        struct scatterlist *sg;
 327        int nr_sgs, err, digestsize;
 328        unsigned int len;
 329        u64 fill, min_fill, byte_count;
 330        void *pad, *result;
 331        int j, i, todo;
 332        __be64 *bebits;
 333        __le64 *lebits;
 334        dma_addr_t addr_res, addr_pad;
 335        __le32 *bf;
 336
 337        algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
 338        ss = algt->ss;
 339
 340        digestsize = algt->alg.hash.halg.digestsize;
 341        if (digestsize == SHA224_DIGEST_SIZE)
 342                digestsize = SHA256_DIGEST_SIZE;
 343
 344        /* the padding could be up to two block. */
 345        pad = kzalloc(algt->alg.hash.halg.base.cra_blocksize * 2, GFP_KERNEL | GFP_DMA);
 346        if (!pad)
 347                return -ENOMEM;
 348        bf = (__le32 *)pad;
 349
 350        result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
 351        if (!result) {
 352                kfree(pad);
 353                return -ENOMEM;
 354        }
 355
 356        for (i = 0; i < MAX_SG; i++) {
 357                rctx->t_dst[i].addr = 0;
 358                rctx->t_dst[i].len = 0;
 359        }
 360
 361#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
 362        algt->stat_req++;
 363#endif
 364
 365        rctx->method = ss->variant->alg_hash[algt->ss_algo_id];
 366
 367        nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
 368        if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
 369                dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs);
 370                err = -EINVAL;
 371                goto theend;
 372        }
 373
 374        addr_res = dma_map_single(ss->dev, result, digestsize, DMA_FROM_DEVICE);
 375        if (dma_mapping_error(ss->dev, addr_res)) {
 376                dev_err(ss->dev, "DMA map dest\n");
 377                err = -EINVAL;
 378                goto theend;
 379        }
 380
 381        len = areq->nbytes;
 382        for_each_sg(areq->src, sg, nr_sgs, i) {
 383                rctx->t_src[i].addr = sg_dma_address(sg);
 384                todo = min(len, sg_dma_len(sg));
 385                rctx->t_src[i].len = todo / 4;
 386                len -= todo;
 387                rctx->t_dst[i].addr = addr_res;
 388                rctx->t_dst[i].len = digestsize / 4;
 389        }
 390        if (len > 0) {
 391                dev_err(ss->dev, "remaining len %d\n", len);
 392                err = -EINVAL;
 393                goto theend;
 394        }
 395
 396        byte_count = areq->nbytes;
 397        j = 0;
 398        bf[j++] = cpu_to_le32(0x80);
 399
 400        fill = 64 - (byte_count % 64);
 401        min_fill = 3 * sizeof(u32);
 402
 403        if (fill < min_fill)
 404                fill += 64;
 405
 406        j += (fill - min_fill) / sizeof(u32);
 407
 408        switch (algt->ss_algo_id) {
 409        case SS_ID_HASH_MD5:
 410                lebits = (__le64 *)&bf[j];
 411                *lebits = cpu_to_le64(byte_count << 3);
 412                j += 2;
 413                break;
 414        case SS_ID_HASH_SHA1:
 415        case SS_ID_HASH_SHA224:
 416        case SS_ID_HASH_SHA256:
 417                bebits = (__be64 *)&bf[j];
 418                *bebits = cpu_to_be64(byte_count << 3);
 419                j += 2;
 420                break;
 421        }
 422
 423        addr_pad = dma_map_single(ss->dev, pad, j * 4, DMA_TO_DEVICE);
 424        rctx->t_src[i].addr = addr_pad;
 425        rctx->t_src[i].len = j;
 426        rctx->t_dst[i].addr = addr_res;
 427        rctx->t_dst[i].len = digestsize / 4;
 428        if (dma_mapping_error(ss->dev, addr_pad)) {
 429                dev_err(ss->dev, "DMA error on padding SG\n");
 430                err = -EINVAL;
 431                goto theend;
 432        }
 433
 434        err = sun8i_ss_run_hash_task(ss, rctx, crypto_tfm_alg_name(areq->base.tfm));
 435
 436        dma_unmap_single(ss->dev, addr_pad, j * 4, DMA_TO_DEVICE);
 437        dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src),
 438                     DMA_TO_DEVICE);
 439        dma_unmap_single(ss->dev, addr_res, digestsize, DMA_FROM_DEVICE);
 440
 441        memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
 442theend:
 443        kfree(pad);
 444        kfree(result);
 445        crypto_finalize_hash_request(engine, breq, err);
 446        return 0;
 447}
 448