linux/drivers/crypto/rockchip/rk3288_crypto_ahash.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Crypto acceleration support for Rockchip RK3288
   4 *
   5 * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
   6 *
   7 * Author: Zain Wang <zain.wang@rock-chips.com>
   8 *
   9 * Some ideas are from marvell/cesa.c and s5p-sss.c driver.
  10 */
  11#include "rk3288_crypto.h"
  12
  13/*
  14 * IC can not process zero message hash,
  15 * so we put the fixed hash out when met zero message.
  16 */
  17
  18static int zero_message_process(struct ahash_request *req)
  19{
  20        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  21        int rk_digest_size = crypto_ahash_digestsize(tfm);
  22
  23        switch (rk_digest_size) {
  24        case SHA1_DIGEST_SIZE:
  25                memcpy(req->result, sha1_zero_message_hash, rk_digest_size);
  26                break;
  27        case SHA256_DIGEST_SIZE:
  28                memcpy(req->result, sha256_zero_message_hash, rk_digest_size);
  29                break;
  30        case MD5_DIGEST_SIZE:
  31                memcpy(req->result, md5_zero_message_hash, rk_digest_size);
  32                break;
  33        default:
  34                return -EINVAL;
  35        }
  36
  37        return 0;
  38}
  39
  40static void rk_ahash_crypto_complete(struct crypto_async_request *base, int err)
  41{
  42        if (base->complete)
  43                base->complete(base, err);
  44}
  45
  46static void rk_ahash_reg_init(struct rk_crypto_info *dev)
  47{
  48        struct ahash_request *req = ahash_request_cast(dev->async_req);
  49        struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
  50        int reg_status = 0;
  51
  52        reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) |
  53                     RK_CRYPTO_HASH_FLUSH | _SBF(0xffff, 16);
  54        CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, reg_status);
  55
  56        reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL);
  57        reg_status &= (~RK_CRYPTO_HASH_FLUSH);
  58        reg_status |= _SBF(0xffff, 16);
  59        CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, reg_status);
  60
  61        memset_io(dev->reg + RK_CRYPTO_HASH_DOUT_0, 0, 32);
  62
  63        CRYPTO_WRITE(dev, RK_CRYPTO_INTENA, RK_CRYPTO_HRDMA_ERR_ENA |
  64                                            RK_CRYPTO_HRDMA_DONE_ENA);
  65
  66        CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, RK_CRYPTO_HRDMA_ERR_INT |
  67                                            RK_CRYPTO_HRDMA_DONE_INT);
  68
  69        CRYPTO_WRITE(dev, RK_CRYPTO_HASH_CTRL, rctx->mode |
  70                                               RK_CRYPTO_HASH_SWAP_DO);
  71
  72        CRYPTO_WRITE(dev, RK_CRYPTO_CONF, RK_CRYPTO_BYTESWAP_HRFIFO |
  73                                          RK_CRYPTO_BYTESWAP_BRFIFO |
  74                                          RK_CRYPTO_BYTESWAP_BTFIFO);
  75
  76        CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, dev->total);
  77}
  78
  79static int rk_ahash_init(struct ahash_request *req)
  80{
  81        struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
  82        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  83        struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
  84
  85        ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
  86        rctx->fallback_req.base.flags = req->base.flags &
  87                                        CRYPTO_TFM_REQ_MAY_SLEEP;
  88
  89        return crypto_ahash_init(&rctx->fallback_req);
  90}
  91
  92static int rk_ahash_update(struct ahash_request *req)
  93{
  94        struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
  95        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  96        struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
  97
  98        ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
  99        rctx->fallback_req.base.flags = req->base.flags &
 100                                        CRYPTO_TFM_REQ_MAY_SLEEP;
 101        rctx->fallback_req.nbytes = req->nbytes;
 102        rctx->fallback_req.src = req->src;
 103
 104        return crypto_ahash_update(&rctx->fallback_req);
 105}
 106
 107static int rk_ahash_final(struct ahash_request *req)
 108{
 109        struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
 110        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 111        struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
 112
 113        ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
 114        rctx->fallback_req.base.flags = req->base.flags &
 115                                        CRYPTO_TFM_REQ_MAY_SLEEP;
 116        rctx->fallback_req.result = req->result;
 117
 118        return crypto_ahash_final(&rctx->fallback_req);
 119}
 120
 121static int rk_ahash_finup(struct ahash_request *req)
 122{
 123        struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
 124        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 125        struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
 126
 127        ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
 128        rctx->fallback_req.base.flags = req->base.flags &
 129                                        CRYPTO_TFM_REQ_MAY_SLEEP;
 130
 131        rctx->fallback_req.nbytes = req->nbytes;
 132        rctx->fallback_req.src = req->src;
 133        rctx->fallback_req.result = req->result;
 134
 135        return crypto_ahash_finup(&rctx->fallback_req);
 136}
 137
 138static int rk_ahash_import(struct ahash_request *req, const void *in)
 139{
 140        struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
 141        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 142        struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
 143
 144        ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
 145        rctx->fallback_req.base.flags = req->base.flags &
 146                                        CRYPTO_TFM_REQ_MAY_SLEEP;
 147
 148        return crypto_ahash_import(&rctx->fallback_req, in);
 149}
 150
 151static int rk_ahash_export(struct ahash_request *req, void *out)
 152{
 153        struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
 154        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 155        struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
 156
 157        ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
 158        rctx->fallback_req.base.flags = req->base.flags &
 159                                        CRYPTO_TFM_REQ_MAY_SLEEP;
 160
 161        return crypto_ahash_export(&rctx->fallback_req, out);
 162}
 163
 164static int rk_ahash_digest(struct ahash_request *req)
 165{
 166        struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
 167        struct rk_crypto_info *dev = tctx->dev;
 168
 169        if (!req->nbytes)
 170                return zero_message_process(req);
 171        else
 172                return dev->enqueue(dev, &req->base);
 173}
 174
 175static void crypto_ahash_dma_start(struct rk_crypto_info *dev)
 176{
 177        CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, dev->addr_in);
 178        CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, (dev->count + 3) / 4);
 179        CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_HASH_START |
 180                                          (RK_CRYPTO_HASH_START << 16));
 181}
 182
 183static int rk_ahash_set_data_start(struct rk_crypto_info *dev)
 184{
 185        int err;
 186
 187        err = dev->load_data(dev, dev->sg_src, NULL);
 188        if (!err)
 189                crypto_ahash_dma_start(dev);
 190        return err;
 191}
 192
 193static int rk_ahash_start(struct rk_crypto_info *dev)
 194{
 195        struct ahash_request *req = ahash_request_cast(dev->async_req);
 196        struct crypto_ahash *tfm;
 197        struct rk_ahash_rctx *rctx;
 198
 199        dev->total = req->nbytes;
 200        dev->left_bytes = req->nbytes;
 201        dev->aligned = 0;
 202        dev->align_size = 4;
 203        dev->sg_dst = NULL;
 204        dev->sg_src = req->src;
 205        dev->first = req->src;
 206        dev->src_nents = sg_nents(req->src);
 207        rctx = ahash_request_ctx(req);
 208        rctx->mode = 0;
 209
 210        tfm = crypto_ahash_reqtfm(req);
 211        switch (crypto_ahash_digestsize(tfm)) {
 212        case SHA1_DIGEST_SIZE:
 213                rctx->mode = RK_CRYPTO_HASH_SHA1;
 214                break;
 215        case SHA256_DIGEST_SIZE:
 216                rctx->mode = RK_CRYPTO_HASH_SHA256;
 217                break;
 218        case MD5_DIGEST_SIZE:
 219                rctx->mode = RK_CRYPTO_HASH_MD5;
 220                break;
 221        default:
 222                return -EINVAL;
 223        }
 224
 225        rk_ahash_reg_init(dev);
 226        return rk_ahash_set_data_start(dev);
 227}
 228
 229static int rk_ahash_crypto_rx(struct rk_crypto_info *dev)
 230{
 231        int err = 0;
 232        struct ahash_request *req = ahash_request_cast(dev->async_req);
 233        struct crypto_ahash *tfm;
 234
 235        dev->unload_data(dev);
 236        if (dev->left_bytes) {
 237                if (dev->aligned) {
 238                        if (sg_is_last(dev->sg_src)) {
 239                                dev_warn(dev->dev, "[%s:%d], Lack of data\n",
 240                                         __func__, __LINE__);
 241                                err = -ENOMEM;
 242                                goto out_rx;
 243                        }
 244                        dev->sg_src = sg_next(dev->sg_src);
 245                }
 246                err = rk_ahash_set_data_start(dev);
 247        } else {
 248                /*
 249                 * it will take some time to process date after last dma
 250                 * transmission.
 251                 *
 252                 * waiting time is relative with the last date len,
 253                 * so cannot set a fixed time here.
 254                 * 10us makes system not call here frequently wasting
 255                 * efficiency, and make it response quickly when dma
 256                 * complete.
 257                 */
 258                while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS))
 259                        udelay(10);
 260
 261                tfm = crypto_ahash_reqtfm(req);
 262                memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0,
 263                              crypto_ahash_digestsize(tfm));
 264                dev->complete(dev->async_req, 0);
 265                tasklet_schedule(&dev->queue_task);
 266        }
 267
 268out_rx:
 269        return err;
 270}
 271
 272static int rk_cra_hash_init(struct crypto_tfm *tfm)
 273{
 274        struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
 275        struct rk_crypto_tmp *algt;
 276        struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
 277
 278        const char *alg_name = crypto_tfm_alg_name(tfm);
 279
 280        algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
 281
 282        tctx->dev = algt->dev;
 283        tctx->dev->addr_vir = (void *)__get_free_page(GFP_KERNEL);
 284        if (!tctx->dev->addr_vir) {
 285                dev_err(tctx->dev->dev, "failed to kmalloc for addr_vir\n");
 286                return -ENOMEM;
 287        }
 288        tctx->dev->start = rk_ahash_start;
 289        tctx->dev->update = rk_ahash_crypto_rx;
 290        tctx->dev->complete = rk_ahash_crypto_complete;
 291
 292        /* for fallback */
 293        tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0,
 294                                               CRYPTO_ALG_NEED_FALLBACK);
 295        if (IS_ERR(tctx->fallback_tfm)) {
 296                dev_err(tctx->dev->dev, "Could not load fallback driver.\n");
 297                return PTR_ERR(tctx->fallback_tfm);
 298        }
 299        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 300                                 sizeof(struct rk_ahash_rctx) +
 301                                 crypto_ahash_reqsize(tctx->fallback_tfm));
 302
 303        return tctx->dev->enable_clk(tctx->dev);
 304}
 305
 306static void rk_cra_hash_exit(struct crypto_tfm *tfm)
 307{
 308        struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
 309
 310        free_page((unsigned long)tctx->dev->addr_vir);
 311        return tctx->dev->disable_clk(tctx->dev);
 312}
 313
 314struct rk_crypto_tmp rk_ahash_sha1 = {
 315        .type = ALG_TYPE_HASH,
 316        .alg.hash = {
 317                .init = rk_ahash_init,
 318                .update = rk_ahash_update,
 319                .final = rk_ahash_final,
 320                .finup = rk_ahash_finup,
 321                .export = rk_ahash_export,
 322                .import = rk_ahash_import,
 323                .digest = rk_ahash_digest,
 324                .halg = {
 325                         .digestsize = SHA1_DIGEST_SIZE,
 326                         .statesize = sizeof(struct sha1_state),
 327                         .base = {
 328                                  .cra_name = "sha1",
 329                                  .cra_driver_name = "rk-sha1",
 330                                  .cra_priority = 300,
 331                                  .cra_flags = CRYPTO_ALG_ASYNC |
 332                                               CRYPTO_ALG_NEED_FALLBACK,
 333                                  .cra_blocksize = SHA1_BLOCK_SIZE,
 334                                  .cra_ctxsize = sizeof(struct rk_ahash_ctx),
 335                                  .cra_alignmask = 3,
 336                                  .cra_init = rk_cra_hash_init,
 337                                  .cra_exit = rk_cra_hash_exit,
 338                                  .cra_module = THIS_MODULE,
 339                                  }
 340                         }
 341        }
 342};
 343
 344struct rk_crypto_tmp rk_ahash_sha256 = {
 345        .type = ALG_TYPE_HASH,
 346        .alg.hash = {
 347                .init = rk_ahash_init,
 348                .update = rk_ahash_update,
 349                .final = rk_ahash_final,
 350                .finup = rk_ahash_finup,
 351                .export = rk_ahash_export,
 352                .import = rk_ahash_import,
 353                .digest = rk_ahash_digest,
 354                .halg = {
 355                         .digestsize = SHA256_DIGEST_SIZE,
 356                         .statesize = sizeof(struct sha256_state),
 357                         .base = {
 358                                  .cra_name = "sha256",
 359                                  .cra_driver_name = "rk-sha256",
 360                                  .cra_priority = 300,
 361                                  .cra_flags = CRYPTO_ALG_ASYNC |
 362                                               CRYPTO_ALG_NEED_FALLBACK,
 363                                  .cra_blocksize = SHA256_BLOCK_SIZE,
 364                                  .cra_ctxsize = sizeof(struct rk_ahash_ctx),
 365                                  .cra_alignmask = 3,
 366                                  .cra_init = rk_cra_hash_init,
 367                                  .cra_exit = rk_cra_hash_exit,
 368                                  .cra_module = THIS_MODULE,
 369                                  }
 370                         }
 371        }
 372};
 373
 374struct rk_crypto_tmp rk_ahash_md5 = {
 375        .type = ALG_TYPE_HASH,
 376        .alg.hash = {
 377                .init = rk_ahash_init,
 378                .update = rk_ahash_update,
 379                .final = rk_ahash_final,
 380                .finup = rk_ahash_finup,
 381                .export = rk_ahash_export,
 382                .import = rk_ahash_import,
 383                .digest = rk_ahash_digest,
 384                .halg = {
 385                         .digestsize = MD5_DIGEST_SIZE,
 386                         .statesize = sizeof(struct md5_state),
 387                         .base = {
 388                                  .cra_name = "md5",
 389                                  .cra_driver_name = "rk-md5",
 390                                  .cra_priority = 300,
 391                                  .cra_flags = CRYPTO_ALG_ASYNC |
 392                                               CRYPTO_ALG_NEED_FALLBACK,
 393                                  .cra_blocksize = SHA1_BLOCK_SIZE,
 394                                  .cra_ctxsize = sizeof(struct rk_ahash_ctx),
 395                                  .cra_alignmask = 3,
 396                                  .cra_init = rk_cra_hash_init,
 397                                  .cra_exit = rk_cra_hash_exit,
 398                                  .cra_module = THIS_MODULE,
 399                                  }
 400                        }
 401        }
 402};
 403