linux/drivers/crypto/hisilicon/hpre/hpre_crypto.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2019 HiSilicon Limited. */
   3#include <crypto/akcipher.h>
   4#include <crypto/dh.h>
   5#include <crypto/internal/akcipher.h>
   6#include <crypto/internal/kpp.h>
   7#include <crypto/internal/rsa.h>
   8#include <crypto/kpp.h>
   9#include <crypto/scatterwalk.h>
  10#include <linux/dma-mapping.h>
  11#include <linux/fips.h>
  12#include <linux/module.h>
  13#include <linux/time.h>
  14#include "hpre.h"
  15
  16struct hpre_ctx;
  17
  18#define HPRE_CRYPTO_ALG_PRI     1000
  19#define HPRE_ALIGN_SZ           64
  20#define HPRE_BITS_2_BYTES_SHIFT 3
  21#define HPRE_RSA_512BITS_KSZ    64
  22#define HPRE_RSA_1536BITS_KSZ   192
  23#define HPRE_CRT_PRMS           5
  24#define HPRE_CRT_Q              2
  25#define HPRE_CRT_P              3
  26#define HPRE_CRT_INV            4
  27#define HPRE_DH_G_FLAG          0x02
  28#define HPRE_TRY_SEND_TIMES     100
  29#define HPRE_INVLD_REQ_ID               (-1)
  30#define HPRE_DEV(ctx)           (&((ctx)->qp->qm->pdev->dev))
  31
  32#define HPRE_SQE_ALG_BITS       5
  33#define HPRE_SQE_DONE_SHIFT     30
  34#define HPRE_DH_MAX_P_SZ        512
  35
  36#define HPRE_DFX_SEC_TO_US      1000000
  37#define HPRE_DFX_US_TO_NS       1000
  38
  39typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
  40
  41struct hpre_rsa_ctx {
  42        /* low address: e--->n */
  43        char *pubkey;
  44        dma_addr_t dma_pubkey;
  45
  46        /* low address: d--->n */
  47        char *prikey;
  48        dma_addr_t dma_prikey;
  49
  50        /* low address: dq->dp->q->p->qinv */
  51        char *crt_prikey;
  52        dma_addr_t dma_crt_prikey;
  53
  54        struct crypto_akcipher *soft_tfm;
  55};
  56
  57struct hpre_dh_ctx {
  58        /*
  59         * If base is g we compute the public key
  60         *      ya = g^xa mod p; [RFC2631 sec 2.1.1]
  61         * else if base if the counterpart public key we
  62         * compute the shared secret
  63         *      ZZ = yb^xa mod p; [RFC2631 sec 2.1.1]
  64         */
  65        char *xa_p; /* low address: d--->n, please refer to Hisilicon HPRE UM */
  66        dma_addr_t dma_xa_p;
  67
  68        char *g; /* m */
  69        dma_addr_t dma_g;
  70};
  71
  72struct hpre_ctx {
  73        struct hisi_qp *qp;
  74        struct hpre_asym_request **req_list;
  75        struct hpre *hpre;
  76        spinlock_t req_lock;
  77        unsigned int key_sz;
  78        bool crt_g2_mode;
  79        struct idr req_idr;
  80        union {
  81                struct hpre_rsa_ctx rsa;
  82                struct hpre_dh_ctx dh;
  83        };
  84};
  85
  86struct hpre_asym_request {
  87        char *src;
  88        char *dst;
  89        struct hpre_sqe req;
  90        struct hpre_ctx *ctx;
  91        union {
  92                struct akcipher_request *rsa;
  93                struct kpp_request *dh;
  94        } areq;
  95        int err;
  96        int req_id;
  97        hpre_cb cb;
  98        struct timespec64 req_time;
  99};
 100
 101static DEFINE_MUTEX(hpre_alg_lock);
 102static unsigned int hpre_active_devs;
 103
 104static int hpre_alloc_req_id(struct hpre_ctx *ctx)
 105{
 106        unsigned long flags;
 107        int id;
 108
 109        spin_lock_irqsave(&ctx->req_lock, flags);
 110        id = idr_alloc(&ctx->req_idr, NULL, 0, QM_Q_DEPTH, GFP_ATOMIC);
 111        spin_unlock_irqrestore(&ctx->req_lock, flags);
 112
 113        return id;
 114}
 115
 116static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id)
 117{
 118        unsigned long flags;
 119
 120        spin_lock_irqsave(&ctx->req_lock, flags);
 121        idr_remove(&ctx->req_idr, req_id);
 122        spin_unlock_irqrestore(&ctx->req_lock, flags);
 123}
 124
 125static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
 126{
 127        struct hpre_ctx *ctx;
 128        struct hpre_dfx *dfx;
 129        int id;
 130
 131        ctx = hpre_req->ctx;
 132        id = hpre_alloc_req_id(ctx);
 133        if (unlikely(id < 0))
 134                return -EINVAL;
 135
 136        ctx->req_list[id] = hpre_req;
 137        hpre_req->req_id = id;
 138
 139        dfx = ctx->hpre->debug.dfx;
 140        if (atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value))
 141                ktime_get_ts64(&hpre_req->req_time);
 142
 143        return id;
 144}
 145
 146static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req)
 147{
 148        struct hpre_ctx *ctx = hpre_req->ctx;
 149        int id = hpre_req->req_id;
 150
 151        if (hpre_req->req_id >= 0) {
 152                hpre_req->req_id = HPRE_INVLD_REQ_ID;
 153                ctx->req_list[id] = NULL;
 154                hpre_free_req_id(ctx, id);
 155        }
 156}
 157
 158static struct hisi_qp *hpre_get_qp_and_start(void)
 159{
 160        struct hisi_qp *qp;
 161        int ret;
 162
 163        qp = hpre_create_qp();
 164        if (!qp) {
 165                pr_err("Can not create hpre qp!\n");
 166                return ERR_PTR(-ENODEV);
 167        }
 168
 169        ret = hisi_qm_start_qp(qp, 0);
 170        if (ret < 0) {
 171                hisi_qm_free_qps(&qp, 1);
 172                pci_err(qp->qm->pdev, "Can not start qp!\n");
 173                return ERR_PTR(-EINVAL);
 174        }
 175
 176        return qp;
 177}
 178
 179static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
 180                                  struct scatterlist *data, unsigned int len,
 181                                  int is_src, dma_addr_t *tmp)
 182{
 183        struct hpre_ctx *ctx = hpre_req->ctx;
 184        struct device *dev = HPRE_DEV(ctx);
 185        enum dma_data_direction dma_dir;
 186
 187        if (is_src) {
 188                hpre_req->src = NULL;
 189                dma_dir = DMA_TO_DEVICE;
 190        } else {
 191                hpre_req->dst = NULL;
 192                dma_dir = DMA_FROM_DEVICE;
 193        }
 194        *tmp = dma_map_single(dev, sg_virt(data),
 195                              len, dma_dir);
 196        if (unlikely(dma_mapping_error(dev, *tmp))) {
 197                dev_err(dev, "dma map data err!\n");
 198                return -ENOMEM;
 199        }
 200
 201        return 0;
 202}
 203
 204static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
 205                                struct scatterlist *data, unsigned int len,
 206                                int is_src, dma_addr_t *tmp)
 207{
 208        struct hpre_ctx *ctx = hpre_req->ctx;
 209        struct device *dev = HPRE_DEV(ctx);
 210        void *ptr;
 211        int shift;
 212
 213        shift = ctx->key_sz - len;
 214        if (unlikely(shift < 0))
 215                return -EINVAL;
 216
 217        ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_KERNEL);
 218        if (unlikely(!ptr))
 219                return -ENOMEM;
 220
 221        if (is_src) {
 222                scatterwalk_map_and_copy(ptr + shift, data, 0, len, 0);
 223                hpre_req->src = ptr;
 224        } else {
 225                hpre_req->dst = ptr;
 226        }
 227
 228        return 0;
 229}
 230
 231static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
 232                             struct scatterlist *data, unsigned int len,
 233                             int is_src, int is_dh)
 234{
 235        struct hpre_sqe *msg = &hpre_req->req;
 236        struct hpre_ctx *ctx = hpre_req->ctx;
 237        dma_addr_t tmp = 0;
 238        int ret;
 239
 240        /* when the data is dh's source, we should format it */
 241        if ((sg_is_last(data) && len == ctx->key_sz) &&
 242            ((is_dh && !is_src) || !is_dh))
 243                ret = hpre_get_data_dma_addr(hpre_req, data, len, is_src, &tmp);
 244        else
 245                ret = hpre_prepare_dma_buf(hpre_req, data, len,
 246                                          is_src, &tmp);
 247        if (unlikely(ret))
 248                return ret;
 249
 250        if (is_src)
 251                msg->in = cpu_to_le64(tmp);
 252        else
 253                msg->out = cpu_to_le64(tmp);
 254
 255        return 0;
 256}
 257
 258static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
 259                                 struct hpre_asym_request *req,
 260                                 struct scatterlist *dst,
 261                                 struct scatterlist *src)
 262{
 263        struct device *dev = HPRE_DEV(ctx);
 264        struct hpre_sqe *sqe = &req->req;
 265        dma_addr_t tmp;
 266
 267        tmp = le64_to_cpu(sqe->in);
 268        if (unlikely(!tmp))
 269                return;
 270
 271        if (src) {
 272                if (req->src)
 273                        dma_free_coherent(dev, ctx->key_sz,
 274                                          req->src, tmp);
 275                else
 276                        dma_unmap_single(dev, tmp,
 277                                         ctx->key_sz, DMA_TO_DEVICE);
 278        }
 279
 280        tmp = le64_to_cpu(sqe->out);
 281        if (unlikely(!tmp))
 282                return;
 283
 284        if (req->dst) {
 285                if (dst)
 286                        scatterwalk_map_and_copy(req->dst, dst, 0,
 287                                                 ctx->key_sz, 1);
 288                dma_free_coherent(dev, ctx->key_sz, req->dst, tmp);
 289        } else {
 290                dma_unmap_single(dev, tmp, ctx->key_sz, DMA_FROM_DEVICE);
 291        }
 292}
 293
 294static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
 295                                void **kreq)
 296{
 297        struct hpre_asym_request *req;
 298        int err, id, done;
 299
 300#define HPRE_NO_HW_ERR          0
 301#define HPRE_HW_TASK_DONE       3
 302#define HREE_HW_ERR_MASK        0x7ff
 303#define HREE_SQE_DONE_MASK      0x3
 304        id = (int)le16_to_cpu(sqe->tag);
 305        req = ctx->req_list[id];
 306        hpre_rm_req_from_ctx(req);
 307        *kreq = req;
 308
 309        err = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_ALG_BITS) &
 310                HREE_HW_ERR_MASK;
 311
 312        done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) &
 313                HREE_SQE_DONE_MASK;
 314
 315        if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE))
 316                return  0;
 317
 318        return -EINVAL;
 319}
 320
 321static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen)
 322{
 323        struct hpre *hpre;
 324
 325        if (!ctx || !qp || qlen < 0)
 326                return -EINVAL;
 327
 328        spin_lock_init(&ctx->req_lock);
 329        ctx->qp = qp;
 330
 331        hpre = container_of(ctx->qp->qm, struct hpre, qm);
 332        ctx->hpre = hpre;
 333        ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL);
 334        if (!ctx->req_list)
 335                return -ENOMEM;
 336        ctx->key_sz = 0;
 337        ctx->crt_g2_mode = false;
 338        idr_init(&ctx->req_idr);
 339
 340        return 0;
 341}
 342
 343static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)
 344{
 345        if (is_clear_all) {
 346                idr_destroy(&ctx->req_idr);
 347                kfree(ctx->req_list);
 348                hisi_qm_free_qps(&ctx->qp, 1);
 349        }
 350
 351        ctx->crt_g2_mode = false;
 352        ctx->key_sz = 0;
 353}
 354
 355static bool hpre_is_bd_timeout(struct hpre_asym_request *req,
 356                               u64 overtime_thrhld)
 357{
 358        struct timespec64 reply_time;
 359        u64 time_use_us;
 360
 361        ktime_get_ts64(&reply_time);
 362        time_use_us = (reply_time.tv_sec - req->req_time.tv_sec) *
 363                HPRE_DFX_SEC_TO_US +
 364                (reply_time.tv_nsec - req->req_time.tv_nsec) /
 365                HPRE_DFX_US_TO_NS;
 366
 367        if (time_use_us <= overtime_thrhld)
 368                return false;
 369
 370        return true;
 371}
 372
 373static void hpre_dh_cb(struct hpre_ctx *ctx, void *resp)
 374{
 375        struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
 376        struct hpre_asym_request *req;
 377        struct kpp_request *areq;
 378        u64 overtime_thrhld;
 379        int ret;
 380
 381        ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
 382        areq = req->areq.dh;
 383        areq->dst_len = ctx->key_sz;
 384
 385        overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
 386        if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
 387                atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
 388
 389        hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
 390        kpp_request_complete(areq, ret);
 391        atomic64_inc(&dfx[HPRE_RECV_CNT].value);
 392}
 393
 394static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp)
 395{
 396        struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
 397        struct hpre_asym_request *req;
 398        struct akcipher_request *areq;
 399        u64 overtime_thrhld;
 400        int ret;
 401
 402        ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
 403
 404        overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
 405        if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
 406                atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
 407
 408        areq = req->areq.rsa;
 409        areq->dst_len = ctx->key_sz;
 410        hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
 411        akcipher_request_complete(areq, ret);
 412        atomic64_inc(&dfx[HPRE_RECV_CNT].value);
 413}
 414
 415static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
 416{
 417        struct hpre_ctx *ctx = qp->qp_ctx;
 418        struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
 419        struct hpre_sqe *sqe = resp;
 420        struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)];
 421
 422
 423        if (unlikely(!req)) {
 424                atomic64_inc(&dfx[HPRE_INVALID_REQ_CNT].value);
 425                return;
 426        }
 427
 428        req->cb(ctx, resp);
 429}
 430
 431static int hpre_ctx_init(struct hpre_ctx *ctx)
 432{
 433        struct hisi_qp *qp;
 434
 435        qp = hpre_get_qp_and_start();
 436        if (IS_ERR(qp))
 437                return PTR_ERR(qp);
 438
 439        qp->qp_ctx = ctx;
 440        qp->req_cb = hpre_alg_cb;
 441
 442        return hpre_ctx_set(ctx, qp, QM_Q_DEPTH);
 443}
 444
 445static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
 446{
 447        struct hpre_asym_request *h_req;
 448        struct hpre_sqe *msg;
 449        int req_id;
 450        void *tmp;
 451
 452        if (is_rsa) {
 453                struct akcipher_request *akreq = req;
 454
 455                if (akreq->dst_len < ctx->key_sz) {
 456                        akreq->dst_len = ctx->key_sz;
 457                        return -EOVERFLOW;
 458                }
 459
 460                tmp = akcipher_request_ctx(akreq);
 461                h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
 462                h_req->cb = hpre_rsa_cb;
 463                h_req->areq.rsa = akreq;
 464                msg = &h_req->req;
 465                memset(msg, 0, sizeof(*msg));
 466        } else {
 467                struct kpp_request *kreq = req;
 468
 469                if (kreq->dst_len < ctx->key_sz) {
 470                        kreq->dst_len = ctx->key_sz;
 471                        return -EOVERFLOW;
 472                }
 473
 474                tmp = kpp_request_ctx(kreq);
 475                h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
 476                h_req->cb = hpre_dh_cb;
 477                h_req->areq.dh = kreq;
 478                msg = &h_req->req;
 479                memset(msg, 0, sizeof(*msg));
 480                msg->key = cpu_to_le64((u64)ctx->dh.dma_xa_p);
 481        }
 482
 483        msg->dw0 |= cpu_to_le32(0x1 << HPRE_SQE_DONE_SHIFT);
 484        msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
 485        h_req->ctx = ctx;
 486
 487        req_id = hpre_add_req_to_ctx(h_req);
 488        if (req_id < 0)
 489                return -EBUSY;
 490
 491        msg->tag = cpu_to_le16((u16)req_id);
 492
 493        return 0;
 494}
 495
 496static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg)
 497{
 498        struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
 499        int ctr = 0;
 500        int ret;
 501
 502        do {
 503                atomic64_inc(&dfx[HPRE_SEND_CNT].value);
 504                ret = hisi_qp_send(ctx->qp, msg);
 505                if (ret != -EBUSY)
 506                        break;
 507                atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value);
 508        } while (ctr++ < HPRE_TRY_SEND_TIMES);
 509
 510        if (likely(!ret))
 511                return ret;
 512
 513        if (ret != -EBUSY)
 514                atomic64_inc(&dfx[HPRE_SEND_FAIL_CNT].value);
 515
 516        return ret;
 517}
 518
 519#ifdef CONFIG_CRYPTO_DH
 520static int hpre_dh_compute_value(struct kpp_request *req)
 521{
 522        struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
 523        struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
 524        void *tmp = kpp_request_ctx(req);
 525        struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
 526        struct hpre_sqe *msg = &hpre_req->req;
 527        int ret;
 528
 529        ret = hpre_msg_request_set(ctx, req, false);
 530        if (unlikely(ret))
 531                return ret;
 532
 533        if (req->src) {
 534                ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 1);
 535                if (unlikely(ret))
 536                        goto clear_all;
 537        }
 538
 539        ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 1);
 540        if (unlikely(ret))
 541                goto clear_all;
 542
 543        if (ctx->crt_g2_mode && !req->src)
 544                msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2);
 545        else
 546                msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH);
 547
 548        /* success */
 549        ret = hpre_send(ctx, msg);
 550        if (likely(!ret))
 551                return -EINPROGRESS;
 552
 553clear_all:
 554        hpre_rm_req_from_ctx(hpre_req);
 555        hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
 556
 557        return ret;
 558}
 559
 560static int hpre_is_dh_params_length_valid(unsigned int key_sz)
 561{
 562#define _HPRE_DH_GRP1           768
 563#define _HPRE_DH_GRP2           1024
 564#define _HPRE_DH_GRP5           1536
 565#define _HPRE_DH_GRP14          2048
 566#define _HPRE_DH_GRP15          3072
 567#define _HPRE_DH_GRP16          4096
 568        switch (key_sz) {
 569        case _HPRE_DH_GRP1:
 570        case _HPRE_DH_GRP2:
 571        case _HPRE_DH_GRP5:
 572        case _HPRE_DH_GRP14:
 573        case _HPRE_DH_GRP15:
 574        case _HPRE_DH_GRP16:
 575                return 0;
 576        }
 577
 578        return -EINVAL;
 579}
 580
 581static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params)
 582{
 583        struct device *dev = HPRE_DEV(ctx);
 584        unsigned int sz;
 585
 586        if (params->p_size > HPRE_DH_MAX_P_SZ)
 587                return -EINVAL;
 588
 589        if (hpre_is_dh_params_length_valid(params->p_size <<
 590                                           HPRE_BITS_2_BYTES_SHIFT))
 591                return -EINVAL;
 592
 593        sz = ctx->key_sz = params->p_size;
 594        ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1,
 595                                          &ctx->dh.dma_xa_p, GFP_KERNEL);
 596        if (!ctx->dh.xa_p)
 597                return -ENOMEM;
 598
 599        memcpy(ctx->dh.xa_p + sz, params->p, sz);
 600
 601        /* If g equals 2 don't copy it */
 602        if (params->g_size == 1 && *(char *)params->g == HPRE_DH_G_FLAG) {
 603                ctx->crt_g2_mode = true;
 604                return 0;
 605        }
 606
 607        ctx->dh.g = dma_alloc_coherent(dev, sz, &ctx->dh.dma_g, GFP_KERNEL);
 608        if (!ctx->dh.g) {
 609                dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
 610                                  ctx->dh.dma_xa_p);
 611                ctx->dh.xa_p = NULL;
 612                return -ENOMEM;
 613        }
 614
 615        memcpy(ctx->dh.g + (sz - params->g_size), params->g, params->g_size);
 616
 617        return 0;
 618}
 619
 620static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
 621{
 622        struct device *dev = HPRE_DEV(ctx);
 623        unsigned int sz = ctx->key_sz;
 624
 625        if (is_clear_all)
 626                hisi_qm_stop_qp(ctx->qp);
 627
 628        if (ctx->dh.g) {
 629                dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g);
 630                ctx->dh.g = NULL;
 631        }
 632
 633        if (ctx->dh.xa_p) {
 634                memzero_explicit(ctx->dh.xa_p, sz);
 635                dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
 636                                  ctx->dh.dma_xa_p);
 637                ctx->dh.xa_p = NULL;
 638        }
 639
 640        hpre_ctx_clear(ctx, is_clear_all);
 641}
 642
 643static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
 644                              unsigned int len)
 645{
 646        struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
 647        struct dh params;
 648        int ret;
 649
 650        if (crypto_dh_decode_key(buf, len, &params) < 0)
 651                return -EINVAL;
 652
 653        /* Free old secret if any */
 654        hpre_dh_clear_ctx(ctx, false);
 655
 656        ret = hpre_dh_set_params(ctx, &params);
 657        if (ret < 0)
 658                goto err_clear_ctx;
 659
 660        memcpy(ctx->dh.xa_p + (ctx->key_sz - params.key_size), params.key,
 661               params.key_size);
 662
 663        return 0;
 664
 665err_clear_ctx:
 666        hpre_dh_clear_ctx(ctx, false);
 667        return ret;
 668}
 669
 670static unsigned int hpre_dh_max_size(struct crypto_kpp *tfm)
 671{
 672        struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
 673
 674        return ctx->key_sz;
 675}
 676
 677static int hpre_dh_init_tfm(struct crypto_kpp *tfm)
 678{
 679        struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
 680
 681        return hpre_ctx_init(ctx);
 682}
 683
 684static void hpre_dh_exit_tfm(struct crypto_kpp *tfm)
 685{
 686        struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
 687
 688        hpre_dh_clear_ctx(ctx, true);
 689}
 690#endif
 691
 692static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len)
 693{
 694        while (!**ptr && *len) {
 695                (*ptr)++;
 696                (*len)--;
 697        }
 698}
 699
 700static bool hpre_rsa_key_size_is_support(unsigned int len)
 701{
 702        unsigned int bits = len << HPRE_BITS_2_BYTES_SHIFT;
 703
 704#define _RSA_1024BITS_KEY_WDTH          1024
 705#define _RSA_2048BITS_KEY_WDTH          2048
 706#define _RSA_3072BITS_KEY_WDTH          3072
 707#define _RSA_4096BITS_KEY_WDTH          4096
 708
 709        switch (bits) {
 710        case _RSA_1024BITS_KEY_WDTH:
 711        case _RSA_2048BITS_KEY_WDTH:
 712        case _RSA_3072BITS_KEY_WDTH:
 713        case _RSA_4096BITS_KEY_WDTH:
 714                return true;
 715        default:
 716                return false;
 717        }
 718}
 719
 720static int hpre_rsa_enc(struct akcipher_request *req)
 721{
 722        struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
 723        struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
 724        void *tmp = akcipher_request_ctx(req);
 725        struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
 726        struct hpre_sqe *msg = &hpre_req->req;
 727        int ret;
 728
 729        /* For 512 and 1536 bits key size, use soft tfm instead */
 730        if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
 731            ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
 732                akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
 733                ret = crypto_akcipher_encrypt(req);
 734                akcipher_request_set_tfm(req, tfm);
 735                return ret;
 736        }
 737
 738        if (unlikely(!ctx->rsa.pubkey))
 739                return -EINVAL;
 740
 741        ret = hpre_msg_request_set(ctx, req, true);
 742        if (unlikely(ret))
 743                return ret;
 744
 745        msg->dw0 |= cpu_to_le32(HPRE_ALG_NC_NCRT);
 746        msg->key = cpu_to_le64((u64)ctx->rsa.dma_pubkey);
 747
 748        ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
 749        if (unlikely(ret))
 750                goto clear_all;
 751
 752        ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
 753        if (unlikely(ret))
 754                goto clear_all;
 755
 756        /* success */
 757        ret = hpre_send(ctx, msg);
 758        if (likely(!ret))
 759                return -EINPROGRESS;
 760
 761clear_all:
 762        hpre_rm_req_from_ctx(hpre_req);
 763        hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
 764
 765        return ret;
 766}
 767
 768static int hpre_rsa_dec(struct akcipher_request *req)
 769{
 770        struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
 771        struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
 772        void *tmp = akcipher_request_ctx(req);
 773        struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
 774        struct hpre_sqe *msg = &hpre_req->req;
 775        int ret;
 776
 777        /* For 512 and 1536 bits key size, use soft tfm instead */
 778        if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
 779            ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
 780                akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
 781                ret = crypto_akcipher_decrypt(req);
 782                akcipher_request_set_tfm(req, tfm);
 783                return ret;
 784        }
 785
 786        if (unlikely(!ctx->rsa.prikey))
 787                return -EINVAL;
 788
 789        ret = hpre_msg_request_set(ctx, req, true);
 790        if (unlikely(ret))
 791                return ret;
 792
 793        if (ctx->crt_g2_mode) {
 794                msg->key = cpu_to_le64((u64)ctx->rsa.dma_crt_prikey);
 795                msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
 796                                       HPRE_ALG_NC_CRT);
 797        } else {
 798                msg->key = cpu_to_le64((u64)ctx->rsa.dma_prikey);
 799                msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
 800                                       HPRE_ALG_NC_NCRT);
 801        }
 802
 803        ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
 804        if (unlikely(ret))
 805                goto clear_all;
 806
 807        ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
 808        if (unlikely(ret))
 809                goto clear_all;
 810
 811        /* success */
 812        ret = hpre_send(ctx, msg);
 813        if (likely(!ret))
 814                return -EINPROGRESS;
 815
 816clear_all:
 817        hpre_rm_req_from_ctx(hpre_req);
 818        hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
 819
 820        return ret;
 821}
 822
 823static int hpre_rsa_set_n(struct hpre_ctx *ctx, const char *value,
 824                          size_t vlen, bool private)
 825{
 826        const char *ptr = value;
 827
 828        hpre_rsa_drop_leading_zeros(&ptr, &vlen);
 829
 830        ctx->key_sz = vlen;
 831
 832        /* if invalid key size provided, we use software tfm */
 833        if (!hpre_rsa_key_size_is_support(ctx->key_sz))
 834                return 0;
 835
 836        ctx->rsa.pubkey = dma_alloc_coherent(HPRE_DEV(ctx), vlen << 1,
 837                                             &ctx->rsa.dma_pubkey,
 838                                             GFP_KERNEL);
 839        if (!ctx->rsa.pubkey)
 840                return -ENOMEM;
 841
 842        if (private) {
 843                ctx->rsa.prikey = dma_alloc_coherent(HPRE_DEV(ctx), vlen << 1,
 844                                                     &ctx->rsa.dma_prikey,
 845                                                     GFP_KERNEL);
 846                if (!ctx->rsa.prikey) {
 847                        dma_free_coherent(HPRE_DEV(ctx), vlen << 1,
 848                                          ctx->rsa.pubkey,
 849                                          ctx->rsa.dma_pubkey);
 850                        ctx->rsa.pubkey = NULL;
 851                        return -ENOMEM;
 852                }
 853                memcpy(ctx->rsa.prikey + vlen, ptr, vlen);
 854        }
 855        memcpy(ctx->rsa.pubkey + vlen, ptr, vlen);
 856
 857        /* Using hardware HPRE to do RSA */
 858        return 1;
 859}
 860
 861static int hpre_rsa_set_e(struct hpre_ctx *ctx, const char *value,
 862                          size_t vlen)
 863{
 864        const char *ptr = value;
 865
 866        hpre_rsa_drop_leading_zeros(&ptr, &vlen);
 867
 868        if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
 869                return -EINVAL;
 870
 871        memcpy(ctx->rsa.pubkey + ctx->key_sz - vlen, ptr, vlen);
 872
 873        return 0;
 874}
 875
 876static int hpre_rsa_set_d(struct hpre_ctx *ctx, const char *value,
 877                          size_t vlen)
 878{
 879        const char *ptr = value;
 880
 881        hpre_rsa_drop_leading_zeros(&ptr, &vlen);
 882
 883        if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
 884                return -EINVAL;
 885
 886        memcpy(ctx->rsa.prikey + ctx->key_sz - vlen, ptr, vlen);
 887
 888        return 0;
 889}
 890
 891static int hpre_crt_para_get(char *para, size_t para_sz,
 892                             const char *raw, size_t raw_sz)
 893{
 894        const char *ptr = raw;
 895        size_t len = raw_sz;
 896
 897        hpre_rsa_drop_leading_zeros(&ptr, &len);
 898        if (!len || len > para_sz)
 899                return -EINVAL;
 900
 901        memcpy(para + para_sz - len, ptr, len);
 902
 903        return 0;
 904}
 905
 906static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key)
 907{
 908        unsigned int hlf_ksz = ctx->key_sz >> 1;
 909        struct device *dev = HPRE_DEV(ctx);
 910        u64 offset;
 911        int ret;
 912
 913        ctx->rsa.crt_prikey = dma_alloc_coherent(dev, hlf_ksz * HPRE_CRT_PRMS,
 914                                        &ctx->rsa.dma_crt_prikey,
 915                                        GFP_KERNEL);
 916        if (!ctx->rsa.crt_prikey)
 917                return -ENOMEM;
 918
 919        ret = hpre_crt_para_get(ctx->rsa.crt_prikey, hlf_ksz,
 920                                rsa_key->dq, rsa_key->dq_sz);
 921        if (ret)
 922                goto free_key;
 923
 924        offset = hlf_ksz;
 925        ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
 926                                rsa_key->dp, rsa_key->dp_sz);
 927        if (ret)
 928                goto free_key;
 929
 930        offset = hlf_ksz * HPRE_CRT_Q;
 931        ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
 932                                rsa_key->q, rsa_key->q_sz);
 933        if (ret)
 934                goto free_key;
 935
 936        offset = hlf_ksz * HPRE_CRT_P;
 937        ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
 938                                rsa_key->p, rsa_key->p_sz);
 939        if (ret)
 940                goto free_key;
 941
 942        offset = hlf_ksz * HPRE_CRT_INV;
 943        ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
 944                                rsa_key->qinv, rsa_key->qinv_sz);
 945        if (ret)
 946                goto free_key;
 947
 948        ctx->crt_g2_mode = true;
 949
 950        return 0;
 951
 952free_key:
 953        offset = hlf_ksz * HPRE_CRT_PRMS;
 954        memzero_explicit(ctx->rsa.crt_prikey, offset);
 955        dma_free_coherent(dev, hlf_ksz * HPRE_CRT_PRMS, ctx->rsa.crt_prikey,
 956                          ctx->rsa.dma_crt_prikey);
 957        ctx->rsa.crt_prikey = NULL;
 958        ctx->crt_g2_mode = false;
 959
 960        return ret;
 961}
 962
 963/* If it is clear all, all the resources of the QP will be cleaned. */
 964static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
 965{
 966        unsigned int half_key_sz = ctx->key_sz >> 1;
 967        struct device *dev = HPRE_DEV(ctx);
 968
 969        if (is_clear_all)
 970                hisi_qm_stop_qp(ctx->qp);
 971
 972        if (ctx->rsa.pubkey) {
 973                dma_free_coherent(dev, ctx->key_sz << 1,
 974                                  ctx->rsa.pubkey, ctx->rsa.dma_pubkey);
 975                ctx->rsa.pubkey = NULL;
 976        }
 977
 978        if (ctx->rsa.crt_prikey) {
 979                memzero_explicit(ctx->rsa.crt_prikey,
 980                                 half_key_sz * HPRE_CRT_PRMS);
 981                dma_free_coherent(dev, half_key_sz * HPRE_CRT_PRMS,
 982                                  ctx->rsa.crt_prikey, ctx->rsa.dma_crt_prikey);
 983                ctx->rsa.crt_prikey = NULL;
 984        }
 985
 986        if (ctx->rsa.prikey) {
 987                memzero_explicit(ctx->rsa.prikey, ctx->key_sz);
 988                dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.prikey,
 989                                  ctx->rsa.dma_prikey);
 990                ctx->rsa.prikey = NULL;
 991        }
 992
 993        hpre_ctx_clear(ctx, is_clear_all);
 994}
 995
 996/*
 997 * we should judge if it is CRT or not,
 998 * CRT: return true,  N-CRT: return false .
 999 */
1000static bool hpre_is_crt_key(struct rsa_key *key)
1001{
1002        u16 len = key->p_sz + key->q_sz + key->dp_sz + key->dq_sz +
1003                  key->qinv_sz;
1004
1005#define LEN_OF_NCRT_PARA        5
1006
1007        /* N-CRT less than 5 parameters */
1008        return len > LEN_OF_NCRT_PARA;
1009}
1010
1011static int hpre_rsa_setkey(struct hpre_ctx *ctx, const void *key,
1012                           unsigned int keylen, bool private)
1013{
1014        struct rsa_key rsa_key;
1015        int ret;
1016
1017        hpre_rsa_clear_ctx(ctx, false);
1018
1019        if (private)
1020                ret = rsa_parse_priv_key(&rsa_key, key, keylen);
1021        else
1022                ret = rsa_parse_pub_key(&rsa_key, key, keylen);
1023        if (ret < 0)
1024                return ret;
1025
1026        ret = hpre_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz, private);
1027        if (ret <= 0)
1028                return ret;
1029
1030        if (private) {
1031                ret = hpre_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
1032                if (ret < 0)
1033                        goto free;
1034
1035                if (hpre_is_crt_key(&rsa_key)) {
1036                        ret = hpre_rsa_setkey_crt(ctx, &rsa_key);
1037                        if (ret < 0)
1038                                goto free;
1039                }
1040        }
1041
1042        ret = hpre_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
1043        if (ret < 0)
1044                goto free;
1045
1046        if ((private && !ctx->rsa.prikey) || !ctx->rsa.pubkey) {
1047                ret = -EINVAL;
1048                goto free;
1049        }
1050
1051        return 0;
1052
1053free:
1054        hpre_rsa_clear_ctx(ctx, false);
1055        return ret;
1056}
1057
1058static int hpre_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
1059                              unsigned int keylen)
1060{
1061        struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1062        int ret;
1063
1064        ret = crypto_akcipher_set_pub_key(ctx->rsa.soft_tfm, key, keylen);
1065        if (ret)
1066                return ret;
1067
1068        return hpre_rsa_setkey(ctx, key, keylen, false);
1069}
1070
1071static int hpre_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
1072                               unsigned int keylen)
1073{
1074        struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1075        int ret;
1076
1077        ret = crypto_akcipher_set_priv_key(ctx->rsa.soft_tfm, key, keylen);
1078        if (ret)
1079                return ret;
1080
1081        return hpre_rsa_setkey(ctx, key, keylen, true);
1082}
1083
1084static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm)
1085{
1086        struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1087
1088        /* For 512 and 1536 bits key size, use soft tfm instead */
1089        if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
1090            ctx->key_sz == HPRE_RSA_1536BITS_KSZ)
1091                return crypto_akcipher_maxsize(ctx->rsa.soft_tfm);
1092
1093        return ctx->key_sz;
1094}
1095
1096static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
1097{
1098        struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1099        int ret;
1100
1101        ctx->rsa.soft_tfm = crypto_alloc_akcipher("rsa-generic", 0, 0);
1102        if (IS_ERR(ctx->rsa.soft_tfm)) {
1103                pr_err("Can not alloc_akcipher!\n");
1104                return PTR_ERR(ctx->rsa.soft_tfm);
1105        }
1106
1107        ret = hpre_ctx_init(ctx);
1108        if (ret)
1109                crypto_free_akcipher(ctx->rsa.soft_tfm);
1110
1111        return ret;
1112}
1113
1114static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm)
1115{
1116        struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1117
1118        hpre_rsa_clear_ctx(ctx, true);
1119        crypto_free_akcipher(ctx->rsa.soft_tfm);
1120}
1121
1122static struct akcipher_alg rsa = {
1123        .sign = hpre_rsa_dec,
1124        .verify = hpre_rsa_enc,
1125        .encrypt = hpre_rsa_enc,
1126        .decrypt = hpre_rsa_dec,
1127        .set_pub_key = hpre_rsa_setpubkey,
1128        .set_priv_key = hpre_rsa_setprivkey,
1129        .max_size = hpre_rsa_max_size,
1130        .init = hpre_rsa_init_tfm,
1131        .exit = hpre_rsa_exit_tfm,
1132        .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
1133        .base = {
1134                .cra_ctxsize = sizeof(struct hpre_ctx),
1135                .cra_priority = HPRE_CRYPTO_ALG_PRI,
1136                .cra_name = "rsa",
1137                .cra_driver_name = "hpre-rsa",
1138                .cra_module = THIS_MODULE,
1139        },
1140};
1141
1142#ifdef CONFIG_CRYPTO_DH
1143static struct kpp_alg dh = {
1144        .set_secret = hpre_dh_set_secret,
1145        .generate_public_key = hpre_dh_compute_value,
1146        .compute_shared_secret = hpre_dh_compute_value,
1147        .max_size = hpre_dh_max_size,
1148        .init = hpre_dh_init_tfm,
1149        .exit = hpre_dh_exit_tfm,
1150        .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
1151        .base = {
1152                .cra_ctxsize = sizeof(struct hpre_ctx),
1153                .cra_priority = HPRE_CRYPTO_ALG_PRI,
1154                .cra_name = "dh",
1155                .cra_driver_name = "hpre-dh",
1156                .cra_module = THIS_MODULE,
1157        },
1158};
1159#endif
1160
1161int hpre_algs_register(void)
1162{
1163        int ret = 0;
1164
1165        mutex_lock(&hpre_alg_lock);
1166        if (++hpre_active_devs == 1) {
1167                rsa.base.cra_flags = 0;
1168                ret = crypto_register_akcipher(&rsa);
1169                if (ret)
1170                        goto unlock;
1171#ifdef CONFIG_CRYPTO_DH
1172                ret = crypto_register_kpp(&dh);
1173                if (ret) {
1174                        crypto_unregister_akcipher(&rsa);
1175                        goto unlock;
1176                }
1177#endif
1178        }
1179
1180unlock:
1181        mutex_unlock(&hpre_alg_lock);
1182        return ret;
1183}
1184
1185void hpre_algs_unregister(void)
1186{
1187        mutex_lock(&hpre_alg_lock);
1188        if (--hpre_active_devs == 0) {
1189                crypto_unregister_akcipher(&rsa);
1190#ifdef CONFIG_CRYPTO_DH
1191                crypto_unregister_kpp(&dh);
1192#endif
1193        }
1194        mutex_unlock(&hpre_alg_lock);
1195}
1196