linux/drivers/crypto/qat/qat_common/qat_asym_algs.c
<<
>>
Prefs
   1/*
   2  This file is provided under a dual BSD/GPLv2 license.  When using or
   3  redistributing this file, you may do so under either license.
   4
   5  GPL LICENSE SUMMARY
   6  Copyright(c) 2014 Intel Corporation.
   7  This program is free software; you can redistribute it and/or modify
   8  it under the terms of version 2 of the GNU General Public License as
   9  published by the Free Software Foundation.
  10
  11  This program is distributed in the hope that it will be useful, but
  12  WITHOUT ANY WARRANTY; without even the implied warranty of
  13  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14  General Public License for more details.
  15
  16  Contact Information:
  17  qat-linux@intel.com
  18
  19  BSD LICENSE
  20  Copyright(c) 2014 Intel Corporation.
  21  Redistribution and use in source and binary forms, with or without
  22  modification, are permitted provided that the following conditions
  23  are met:
  24
  25        * Redistributions of source code must retain the above copyright
  26          notice, this list of conditions and the following disclaimer.
  27        * Redistributions in binary form must reproduce the above copyright
  28          notice, this list of conditions and the following disclaimer in
  29          the documentation and/or other materials provided with the
  30          distribution.
  31        * Neither the name of Intel Corporation nor the names of its
  32          contributors may be used to endorse or promote products derived
  33          from this software without specific prior written permission.
  34
  35  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  36  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  37  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  38  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  39  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  40  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  41  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  42  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  43  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  44  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  45  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  46*/
  47
  48#include <linux/module.h>
  49#include <crypto/internal/rsa.h>
  50#include <crypto/internal/akcipher.h>
  51#include <crypto/akcipher.h>
  52#include <linux/dma-mapping.h>
  53#include <linux/fips.h>
  54#include <crypto/scatterwalk.h>
  55#include "qat_rsapubkey-asn1.h"
  56#include "qat_rsaprivkey-asn1.h"
  57#include "icp_qat_fw_pke.h"
  58#include "adf_accel_devices.h"
  59#include "adf_transport.h"
  60#include "adf_common_drv.h"
  61#include "qat_crypto.h"
  62
  63static DEFINE_MUTEX(algs_lock);
  64static unsigned int active_devs;
  65
  66struct qat_rsa_input_params {
  67        union {
  68                struct {
  69                        dma_addr_t m;
  70                        dma_addr_t e;
  71                        dma_addr_t n;
  72                } enc;
  73                struct {
  74                        dma_addr_t c;
  75                        dma_addr_t d;
  76                        dma_addr_t n;
  77                } dec;
  78                u64 in_tab[8];
  79        };
  80} __packed __aligned(64);
  81
  82struct qat_rsa_output_params {
  83        union {
  84                struct {
  85                        dma_addr_t c;
  86                } enc;
  87                struct {
  88                        dma_addr_t m;
  89                } dec;
  90                u64 out_tab[8];
  91        };
  92} __packed __aligned(64);
  93
  94struct qat_rsa_ctx {
  95        char *n;
  96        char *e;
  97        char *d;
  98        dma_addr_t dma_n;
  99        dma_addr_t dma_e;
 100        dma_addr_t dma_d;
 101        unsigned int key_sz;
 102        struct qat_crypto_instance *inst;
 103} __packed __aligned(64);
 104
 105struct qat_rsa_request {
 106        struct qat_rsa_input_params in;
 107        struct qat_rsa_output_params out;
 108        dma_addr_t phy_in;
 109        dma_addr_t phy_out;
 110        char *src_align;
 111        char *dst_align;
 112        struct icp_qat_fw_pke_request req;
 113        struct qat_rsa_ctx *ctx;
 114        int err;
 115} __aligned(64);
 116
 117static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
 118{
 119        struct akcipher_request *areq = (void *)(__force long)resp->opaque;
 120        struct qat_rsa_request *req = PTR_ALIGN(akcipher_request_ctx(areq), 64);
 121        struct device *dev = &GET_DEV(req->ctx->inst->accel_dev);
 122        int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
 123                                resp->pke_resp_hdr.comn_resp_flags);
 124
 125        err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
 126
 127        if (req->src_align)
 128                dma_free_coherent(dev, req->ctx->key_sz, req->src_align,
 129                                  req->in.enc.m);
 130        else
 131                dma_unmap_single(dev, req->in.enc.m, req->ctx->key_sz,
 132                                 DMA_TO_DEVICE);
 133
 134        areq->dst_len = req->ctx->key_sz;
 135        if (req->dst_align) {
 136                char *ptr = req->dst_align;
 137
 138                while (!(*ptr) && areq->dst_len) {
 139                        areq->dst_len--;
 140                        ptr++;
 141                }
 142
 143                if (areq->dst_len != req->ctx->key_sz)
 144                        memmove(req->dst_align, ptr, areq->dst_len);
 145
 146                scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
 147                                         areq->dst_len, 1);
 148
 149                dma_free_coherent(dev, req->ctx->key_sz, req->dst_align,
 150                                  req->out.enc.c);
 151        } else {
 152                char *ptr = sg_virt(areq->dst);
 153
 154                while (!(*ptr) && areq->dst_len) {
 155                        areq->dst_len--;
 156                        ptr++;
 157                }
 158
 159                if (sg_virt(areq->dst) != ptr && areq->dst_len)
 160                        memmove(sg_virt(areq->dst), ptr, areq->dst_len);
 161
 162                dma_unmap_single(dev, req->out.enc.c, req->ctx->key_sz,
 163                                 DMA_FROM_DEVICE);
 164        }
 165
 166        dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
 167                         DMA_TO_DEVICE);
 168        dma_unmap_single(dev, req->phy_out,
 169                         sizeof(struct qat_rsa_output_params),
 170                         DMA_TO_DEVICE);
 171
 172        akcipher_request_complete(areq, err);
 173}
 174
 175void qat_alg_asym_callback(void *_resp)
 176{
 177        struct icp_qat_fw_pke_resp *resp = _resp;
 178
 179        qat_rsa_cb(resp);
 180}
 181
 182#define PKE_RSA_EP_512 0x1c161b21
 183#define PKE_RSA_EP_1024 0x35111bf7
 184#define PKE_RSA_EP_1536 0x4d111cdc
 185#define PKE_RSA_EP_2048 0x6e111dba
 186#define PKE_RSA_EP_3072 0x7d111ea3
 187#define PKE_RSA_EP_4096 0xa5101f7e
 188
 189static unsigned long qat_rsa_enc_fn_id(unsigned int len)
 190{
 191        unsigned int bitslen = len << 3;
 192
 193        switch (bitslen) {
 194        case 512:
 195                return PKE_RSA_EP_512;
 196        case 1024:
 197                return PKE_RSA_EP_1024;
 198        case 1536:
 199                return PKE_RSA_EP_1536;
 200        case 2048:
 201                return PKE_RSA_EP_2048;
 202        case 3072:
 203                return PKE_RSA_EP_3072;
 204        case 4096:
 205                return PKE_RSA_EP_4096;
 206        default:
 207                return 0;
 208        };
 209}
 210
 211#define PKE_RSA_DP1_512 0x1c161b3c
 212#define PKE_RSA_DP1_1024 0x35111c12
 213#define PKE_RSA_DP1_1536 0x4d111cf7
 214#define PKE_RSA_DP1_2048 0x6e111dda
 215#define PKE_RSA_DP1_3072 0x7d111ebe
 216#define PKE_RSA_DP1_4096 0xa5101f98
 217
 218static unsigned long qat_rsa_dec_fn_id(unsigned int len)
 219{
 220        unsigned int bitslen = len << 3;
 221
 222        switch (bitslen) {
 223        case 512:
 224                return PKE_RSA_DP1_512;
 225        case 1024:
 226                return PKE_RSA_DP1_1024;
 227        case 1536:
 228                return PKE_RSA_DP1_1536;
 229        case 2048:
 230                return PKE_RSA_DP1_2048;
 231        case 3072:
 232                return PKE_RSA_DP1_3072;
 233        case 4096:
 234                return PKE_RSA_DP1_4096;
 235        default:
 236                return 0;
 237        };
 238}
 239
 240static int qat_rsa_enc(struct akcipher_request *req)
 241{
 242        struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
 243        struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
 244        struct qat_crypto_instance *inst = ctx->inst;
 245        struct device *dev = &GET_DEV(inst->accel_dev);
 246        struct qat_rsa_request *qat_req =
 247                        PTR_ALIGN(akcipher_request_ctx(req), 64);
 248        struct icp_qat_fw_pke_request *msg = &qat_req->req;
 249        int ret, ctr = 0;
 250
 251        if (unlikely(!ctx->n || !ctx->e))
 252                return -EINVAL;
 253
 254        if (req->dst_len < ctx->key_sz) {
 255                req->dst_len = ctx->key_sz;
 256                return -EOVERFLOW;
 257        }
 258        memset(msg, '\0', sizeof(*msg));
 259        ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
 260                                          ICP_QAT_FW_COMN_REQ_FLAG_SET);
 261        msg->pke_hdr.cd_pars.func_id = qat_rsa_enc_fn_id(ctx->key_sz);
 262        if (unlikely(!msg->pke_hdr.cd_pars.func_id))
 263                return -EINVAL;
 264
 265        qat_req->ctx = ctx;
 266        msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
 267        msg->pke_hdr.comn_req_flags =
 268                ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
 269                                            QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
 270
 271        qat_req->in.enc.e = ctx->dma_e;
 272        qat_req->in.enc.n = ctx->dma_n;
 273        ret = -ENOMEM;
 274
 275        /*
 276         * src can be of any size in valid range, but HW expects it to be the
 277         * same as modulo n so in case it is different we need to allocate a
 278         * new buf and copy src data.
 279         * In other case we just need to map the user provided buffer.
 280         * Also need to make sure that it is in contiguous buffer.
 281         */
 282        if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
 283                qat_req->src_align = NULL;
 284                qat_req->in.enc.m = dma_map_single(dev, sg_virt(req->src),
 285                                                   req->src_len, DMA_TO_DEVICE);
 286                if (unlikely(dma_mapping_error(dev, qat_req->in.enc.m)))
 287                        return ret;
 288
 289        } else {
 290                int shift = ctx->key_sz - req->src_len;
 291
 292                qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
 293                                                         &qat_req->in.enc.m,
 294                                                         GFP_KERNEL);
 295                if (unlikely(!qat_req->src_align))
 296                        return ret;
 297
 298                scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
 299                                         0, req->src_len, 0);
 300        }
 301        if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
 302                qat_req->dst_align = NULL;
 303                qat_req->out.enc.c = dma_map_single(dev, sg_virt(req->dst),
 304                                                    req->dst_len,
 305                                                    DMA_FROM_DEVICE);
 306
 307                if (unlikely(dma_mapping_error(dev, qat_req->out.enc.c)))
 308                        goto unmap_src;
 309
 310        } else {
 311                qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
 312                                                         &qat_req->out.enc.c,
 313                                                         GFP_KERNEL);
 314                if (unlikely(!qat_req->dst_align))
 315                        goto unmap_src;
 316
 317        }
 318        qat_req->in.in_tab[3] = 0;
 319        qat_req->out.out_tab[1] = 0;
 320        qat_req->phy_in = dma_map_single(dev, &qat_req->in.enc.m,
 321                                         sizeof(struct qat_rsa_input_params),
 322                                         DMA_TO_DEVICE);
 323        if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
 324                goto unmap_dst;
 325
 326        qat_req->phy_out = dma_map_single(dev, &qat_req->out.enc.c,
 327                                          sizeof(struct qat_rsa_output_params),
 328                                          DMA_TO_DEVICE);
 329        if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
 330                goto unmap_in_params;
 331
 332        msg->pke_mid.src_data_addr = qat_req->phy_in;
 333        msg->pke_mid.dest_data_addr = qat_req->phy_out;
 334        msg->pke_mid.opaque = (uint64_t)(__force long)req;
 335        msg->input_param_count = 3;
 336        msg->output_param_count = 1;
 337        do {
 338                ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
 339        } while (ret == -EBUSY && ctr++ < 100);
 340
 341        if (!ret)
 342                return -EINPROGRESS;
 343
 344        if (!dma_mapping_error(dev, qat_req->phy_out))
 345                dma_unmap_single(dev, qat_req->phy_out,
 346                                 sizeof(struct qat_rsa_output_params),
 347                                 DMA_TO_DEVICE);
 348unmap_in_params:
 349        if (!dma_mapping_error(dev, qat_req->phy_in))
 350                dma_unmap_single(dev, qat_req->phy_in,
 351                                 sizeof(struct qat_rsa_input_params),
 352                                 DMA_TO_DEVICE);
 353unmap_dst:
 354        if (qat_req->dst_align)
 355                dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
 356                                  qat_req->out.enc.c);
 357        else
 358                if (!dma_mapping_error(dev, qat_req->out.enc.c))
 359                        dma_unmap_single(dev, qat_req->out.enc.c, ctx->key_sz,
 360                                         DMA_FROM_DEVICE);
 361unmap_src:
 362        if (qat_req->src_align)
 363                dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
 364                                  qat_req->in.enc.m);
 365        else
 366                if (!dma_mapping_error(dev, qat_req->in.enc.m))
 367                        dma_unmap_single(dev, qat_req->in.enc.m, ctx->key_sz,
 368                                         DMA_TO_DEVICE);
 369        return ret;
 370}
 371
 372static int qat_rsa_dec(struct akcipher_request *req)
 373{
 374        struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
 375        struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
 376        struct qat_crypto_instance *inst = ctx->inst;
 377        struct device *dev = &GET_DEV(inst->accel_dev);
 378        struct qat_rsa_request *qat_req =
 379                        PTR_ALIGN(akcipher_request_ctx(req), 64);
 380        struct icp_qat_fw_pke_request *msg = &qat_req->req;
 381        int ret, ctr = 0;
 382
 383        if (unlikely(!ctx->n || !ctx->d))
 384                return -EINVAL;
 385
 386        if (req->dst_len < ctx->key_sz) {
 387                req->dst_len = ctx->key_sz;
 388                return -EOVERFLOW;
 389        }
 390        memset(msg, '\0', sizeof(*msg));
 391        ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
 392                                          ICP_QAT_FW_COMN_REQ_FLAG_SET);
 393        msg->pke_hdr.cd_pars.func_id = qat_rsa_dec_fn_id(ctx->key_sz);
 394        if (unlikely(!msg->pke_hdr.cd_pars.func_id))
 395                return -EINVAL;
 396
 397        qat_req->ctx = ctx;
 398        msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
 399        msg->pke_hdr.comn_req_flags =
 400                ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
 401                                            QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
 402
 403        qat_req->in.dec.d = ctx->dma_d;
 404        qat_req->in.dec.n = ctx->dma_n;
 405        ret = -ENOMEM;
 406
 407        /*
 408         * src can be of any size in valid range, but HW expects it to be the
 409         * same as modulo n so in case it is different we need to allocate a
 410         * new buf and copy src data.
 411         * In other case we just need to map the user provided buffer.
 412         * Also need to make sure that it is in contiguous buffer.
 413         */
 414        if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
 415                qat_req->src_align = NULL;
 416                qat_req->in.dec.c = dma_map_single(dev, sg_virt(req->src),
 417                                                   req->dst_len, DMA_TO_DEVICE);
 418                if (unlikely(dma_mapping_error(dev, qat_req->in.dec.c)))
 419                        return ret;
 420
 421        } else {
 422                int shift = ctx->key_sz - req->src_len;
 423
 424                qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
 425                                                         &qat_req->in.dec.c,
 426                                                         GFP_KERNEL);
 427                if (unlikely(!qat_req->src_align))
 428                        return ret;
 429
 430                scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
 431                                         0, req->src_len, 0);
 432        }
 433        if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
 434                qat_req->dst_align = NULL;
 435                qat_req->out.dec.m = dma_map_single(dev, sg_virt(req->dst),
 436                                                    req->dst_len,
 437                                                    DMA_FROM_DEVICE);
 438
 439                if (unlikely(dma_mapping_error(dev, qat_req->out.dec.m)))
 440                        goto unmap_src;
 441
 442        } else {
 443                qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
 444                                                         &qat_req->out.dec.m,
 445                                                         GFP_KERNEL);
 446                if (unlikely(!qat_req->dst_align))
 447                        goto unmap_src;
 448
 449        }
 450
 451        qat_req->in.in_tab[3] = 0;
 452        qat_req->out.out_tab[1] = 0;
 453        qat_req->phy_in = dma_map_single(dev, &qat_req->in.dec.c,
 454                                         sizeof(struct qat_rsa_input_params),
 455                                         DMA_TO_DEVICE);
 456        if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
 457                goto unmap_dst;
 458
 459        qat_req->phy_out = dma_map_single(dev, &qat_req->out.dec.m,
 460                                          sizeof(struct qat_rsa_output_params),
 461                                          DMA_TO_DEVICE);
 462        if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
 463                goto unmap_in_params;
 464
 465        msg->pke_mid.src_data_addr = qat_req->phy_in;
 466        msg->pke_mid.dest_data_addr = qat_req->phy_out;
 467        msg->pke_mid.opaque = (uint64_t)(__force long)req;
 468        msg->input_param_count = 3;
 469        msg->output_param_count = 1;
 470        do {
 471                ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
 472        } while (ret == -EBUSY && ctr++ < 100);
 473
 474        if (!ret)
 475                return -EINPROGRESS;
 476
 477        if (!dma_mapping_error(dev, qat_req->phy_out))
 478                dma_unmap_single(dev, qat_req->phy_out,
 479                                 sizeof(struct qat_rsa_output_params),
 480                                 DMA_TO_DEVICE);
 481unmap_in_params:
 482        if (!dma_mapping_error(dev, qat_req->phy_in))
 483                dma_unmap_single(dev, qat_req->phy_in,
 484                                 sizeof(struct qat_rsa_input_params),
 485                                 DMA_TO_DEVICE);
 486unmap_dst:
 487        if (qat_req->dst_align)
 488                dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
 489                                  qat_req->out.dec.m);
 490        else
 491                if (!dma_mapping_error(dev, qat_req->out.dec.m))
 492                        dma_unmap_single(dev, qat_req->out.dec.m, ctx->key_sz,
 493                                         DMA_FROM_DEVICE);
 494unmap_src:
 495        if (qat_req->src_align)
 496                dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
 497                                  qat_req->in.dec.c);
 498        else
 499                if (!dma_mapping_error(dev, qat_req->in.dec.c))
 500                        dma_unmap_single(dev, qat_req->in.dec.c, ctx->key_sz,
 501                                         DMA_TO_DEVICE);
 502        return ret;
 503}
 504
 505int qat_rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
 506                  const void *value, size_t vlen)
 507{
 508        struct qat_rsa_ctx *ctx = context;
 509        struct qat_crypto_instance *inst = ctx->inst;
 510        struct device *dev = &GET_DEV(inst->accel_dev);
 511        const char *ptr = value;
 512        int ret;
 513
 514        while (!*ptr && vlen) {
 515                ptr++;
 516                vlen--;
 517        }
 518
 519        ctx->key_sz = vlen;
 520        ret = -EINVAL;
 521        /* In FIPS mode only allow key size 2K & 3K */
 522        if (fips_enabled && (ctx->key_sz != 256 && ctx->key_sz != 384)) {
 523                pr_err("QAT: RSA: key size not allowed in FIPS mode\n");
 524                goto err;
 525        }
 526        /* invalid key size provided */
 527        if (!qat_rsa_enc_fn_id(ctx->key_sz))
 528                goto err;
 529
 530        ret = -ENOMEM;
 531        ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
 532        if (!ctx->n)
 533                goto err;
 534
 535        memcpy(ctx->n, ptr, ctx->key_sz);
 536        return 0;
 537err:
 538        ctx->key_sz = 0;
 539        ctx->n = NULL;
 540        return ret;
 541}
 542
 543int qat_rsa_get_e(void *context, size_t hdrlen, unsigned char tag,
 544                  const void *value, size_t vlen)
 545{
 546        struct qat_rsa_ctx *ctx = context;
 547        struct qat_crypto_instance *inst = ctx->inst;
 548        struct device *dev = &GET_DEV(inst->accel_dev);
 549        const char *ptr = value;
 550
 551        while (!*ptr && vlen) {
 552                ptr++;
 553                vlen--;
 554        }
 555
 556        if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) {
 557                ctx->e = NULL;
 558                return -EINVAL;
 559        }
 560
 561        ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
 562        if (!ctx->e) {
 563                ctx->e = NULL;
 564                return -ENOMEM;
 565        }
 566        memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen);
 567        return 0;
 568}
 569
 570int qat_rsa_get_d(void *context, size_t hdrlen, unsigned char tag,
 571                  const void *value, size_t vlen)
 572{
 573        struct qat_rsa_ctx *ctx = context;
 574        struct qat_crypto_instance *inst = ctx->inst;
 575        struct device *dev = &GET_DEV(inst->accel_dev);
 576        const char *ptr = value;
 577        int ret;
 578
 579        while (!*ptr && vlen) {
 580                ptr++;
 581                vlen--;
 582        }
 583
 584        ret = -EINVAL;
 585        if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
 586                goto err;
 587
 588        /* In FIPS mode only allow key size 2K & 3K */
 589        if (fips_enabled && (vlen != 256 && vlen != 384)) {
 590                pr_err("QAT: RSA: key size not allowed in FIPS mode\n");
 591                goto err;
 592        }
 593
 594        ret = -ENOMEM;
 595        ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
 596        if (!ctx->n)
 597                goto err;
 598
 599        memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen);
 600        return 0;
 601err:
 602        ctx->d = NULL;
 603        return ret;
 604}
 605
 606static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
 607                          unsigned int keylen, bool private)
 608{
 609        struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
 610        struct device *dev = &GET_DEV(ctx->inst->accel_dev);
 611        int ret;
 612
 613        /* Free the old key if any */
 614        if (ctx->n)
 615                dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
 616        if (ctx->e)
 617                dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
 618        if (ctx->d) {
 619                memset(ctx->d, '\0', ctx->key_sz);
 620                dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
 621        }
 622
 623        ctx->n = NULL;
 624        ctx->e = NULL;
 625        ctx->d = NULL;
 626
 627        if (private)
 628                ret = asn1_ber_decoder(&qat_rsaprivkey_decoder, ctx, key,
 629                                       keylen);
 630        else
 631                ret = asn1_ber_decoder(&qat_rsapubkey_decoder, ctx, key,
 632                                       keylen);
 633        if (ret < 0)
 634                goto free;
 635
 636        if (!ctx->n || !ctx->e) {
 637                /* invalid key provided */
 638                ret = -EINVAL;
 639                goto free;
 640        }
 641        if (private && !ctx->d) {
 642                /* invalid private key provided */
 643                ret = -EINVAL;
 644                goto free;
 645        }
 646
 647        return 0;
 648free:
 649        if (ctx->d) {
 650                memset(ctx->d, '\0', ctx->key_sz);
 651                dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
 652                ctx->d = NULL;
 653        }
 654        if (ctx->e) {
 655                dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
 656                ctx->e = NULL;
 657        }
 658        if (ctx->n) {
 659                dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
 660                ctx->n = NULL;
 661                ctx->key_sz = 0;
 662        }
 663        return ret;
 664}
 665
 666static int qat_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
 667                             unsigned int keylen)
 668{
 669        return qat_rsa_setkey(tfm, key, keylen, false);
 670}
 671
 672static int qat_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
 673                              unsigned int keylen)
 674{
 675        return qat_rsa_setkey(tfm, key, keylen, true);
 676}
 677
 678static int qat_rsa_max_size(struct crypto_akcipher *tfm)
 679{
 680        struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
 681
 682        return (ctx->n) ? ctx->key_sz : -EINVAL;
 683}
 684
 685static int qat_rsa_init_tfm(struct crypto_akcipher *tfm)
 686{
 687        struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
 688        struct qat_crypto_instance *inst =
 689                        qat_crypto_get_instance_node(get_current_node());
 690
 691        if (!inst)
 692                return -EINVAL;
 693
 694        ctx->key_sz = 0;
 695        ctx->inst = inst;
 696        return 0;
 697}
 698
 699static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
 700{
 701        struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
 702        struct device *dev = &GET_DEV(ctx->inst->accel_dev);
 703
 704        if (ctx->n)
 705                dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
 706        if (ctx->e)
 707                dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
 708        if (ctx->d) {
 709                memset(ctx->d, '\0', ctx->key_sz);
 710                dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
 711        }
 712        qat_crypto_put_instance(ctx->inst);
 713        ctx->n = NULL;
 714        ctx->d = NULL;
 715        ctx->d = NULL;
 716}
 717
 718static struct akcipher_alg rsa = {
 719        .encrypt = qat_rsa_enc,
 720        .decrypt = qat_rsa_dec,
 721        .sign = qat_rsa_dec,
 722        .verify = qat_rsa_enc,
 723        .set_pub_key = qat_rsa_setpubkey,
 724        .set_priv_key = qat_rsa_setprivkey,
 725        .max_size = qat_rsa_max_size,
 726        .init = qat_rsa_init_tfm,
 727        .exit = qat_rsa_exit_tfm,
 728        .reqsize = sizeof(struct qat_rsa_request) + 64,
 729        .base = {
 730                .cra_name = "rsa",
 731                .cra_driver_name = "qat-rsa",
 732                .cra_priority = 1000,
 733                .cra_module = THIS_MODULE,
 734                .cra_ctxsize = sizeof(struct qat_rsa_ctx),
 735        },
 736};
 737
 738int qat_asym_algs_register(void)
 739{
 740        int ret = 0;
 741
 742        mutex_lock(&algs_lock);
 743        if (++active_devs == 1) {
 744                rsa.base.cra_flags = 0;
 745                ret = crypto_register_akcipher(&rsa);
 746        }
 747        mutex_unlock(&algs_lock);
 748        return ret;
 749}
 750
 751void qat_asym_algs_unregister(void)
 752{
 753        mutex_lock(&algs_lock);
 754        if (--active_devs == 0)
 755                crypto_unregister_akcipher(&rsa);
 756        mutex_unlock(&algs_lock);
 757}
 758