linux/drivers/crypto/ccp/ccp-ops.c
<<
>>
Prefs
   1/*
   2 * AMD Cryptographic Coprocessor (CCP) driver
   3 *
   4 * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
   5 *
   6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
   7 * Author: Gary R Hook <gary.hook@amd.com>
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12 */
  13
  14#include <linux/module.h>
  15#include <linux/kernel.h>
  16#include <linux/pci.h>
  17#include <linux/interrupt.h>
  18#include <crypto/scatterwalk.h>
  19#include <linux/ccp.h>
  20
  21#include "ccp-dev.h"
  22
  23/*
  24 * Note: We do not support CCP Crypto in RHEL7
  25 */
  26#define RHEL_SUPPORT_CCP_CRYPTO 0
  27
  28/* SHA initial context values */
  29static const __be32 ccp_sha1_init[SHA1_DIGEST_SIZE / sizeof(__be32)] = {
  30        cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
  31        cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
  32        cpu_to_be32(SHA1_H4),
  33};
  34
  35static const __be32 ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
  36        cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
  37        cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
  38        cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
  39        cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
  40};
  41
  42static const __be32 ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
  43        cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
  44        cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
  45        cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
  46        cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
  47};
  48
  49#define CCP_NEW_JOBID(ccp)      ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \
  50                                        ccp_gen_jobid(ccp) : 0)
  51
  52static u32 ccp_gen_jobid(struct ccp_device *ccp)
  53{
  54        return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK;
  55}
  56
  57static void ccp_sg_free(struct ccp_sg_workarea *wa)
  58{
  59        if (wa->dma_count)
  60                dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir);
  61
  62        wa->dma_count = 0;
  63}
  64
  65static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
  66                                struct scatterlist *sg, u64 len,
  67                                enum dma_data_direction dma_dir)
  68{
  69        memset(wa, 0, sizeof(*wa));
  70
  71        wa->sg = sg;
  72        if (!sg)
  73                return 0;
  74
  75        wa->nents = sg_nents_for_len(sg, len);
  76        if (wa->nents < 0)
  77                return wa->nents;
  78
  79        wa->bytes_left = len;
  80        wa->sg_used = 0;
  81
  82        if (len == 0)
  83                return 0;
  84
  85        if (dma_dir == DMA_NONE)
  86                return 0;
  87
  88        wa->dma_sg = sg;
  89        wa->dma_dev = dev;
  90        wa->dma_dir = dma_dir;
  91        wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir);
  92        if (!wa->dma_count)
  93                return -ENOMEM;
  94
  95        return 0;
  96}
  97
  98#if RHEL_SUPPORT_CCP_CRYPTO
  99static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len)
 100{
 101        unsigned int nbytes = min_t(u64, len, wa->bytes_left);
 102
 103        if (!wa->sg)
 104                return;
 105
 106        wa->sg_used += nbytes;
 107        wa->bytes_left -= nbytes;
 108        if (wa->sg_used == wa->sg->length) {
 109                wa->sg = sg_next(wa->sg);
 110                wa->sg_used = 0;
 111        }
 112}
 113#endif /* RHEL_SUPPORT_CCP_CRYPTO */
 114
 115static void ccp_dm_free(struct ccp_dm_workarea *wa)
 116{
 117        if (wa->length <= CCP_DMAPOOL_MAX_SIZE) {
 118                if (wa->address)
 119                        dma_pool_free(wa->dma_pool, wa->address,
 120                                      wa->dma.address);
 121        } else {
 122                if (wa->dma.address)
 123                        dma_unmap_single(wa->dev, wa->dma.address, wa->length,
 124                                         wa->dma.dir);
 125                kfree(wa->address);
 126        }
 127
 128        wa->address = NULL;
 129        wa->dma.address = 0;
 130}
 131
 132static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
 133                                struct ccp_cmd_queue *cmd_q,
 134                                unsigned int len,
 135                                enum dma_data_direction dir)
 136{
 137        memset(wa, 0, sizeof(*wa));
 138
 139        if (!len)
 140                return 0;
 141
 142        wa->dev = cmd_q->ccp->dev;
 143        wa->length = len;
 144
 145        if (len <= CCP_DMAPOOL_MAX_SIZE) {
 146                wa->dma_pool = cmd_q->dma_pool;
 147
 148                wa->address = dma_pool_alloc(wa->dma_pool, GFP_KERNEL,
 149                                             &wa->dma.address);
 150                if (!wa->address)
 151                        return -ENOMEM;
 152
 153                wa->dma.length = CCP_DMAPOOL_MAX_SIZE;
 154
 155                memset(wa->address, 0, CCP_DMAPOOL_MAX_SIZE);
 156        } else {
 157                wa->address = kzalloc(len, GFP_KERNEL);
 158                if (!wa->address)
 159                        return -ENOMEM;
 160
 161                wa->dma.address = dma_map_single(wa->dev, wa->address, len,
 162                                                 dir);
 163                if (!wa->dma.address)
 164                        return -ENOMEM;
 165
 166                wa->dma.length = len;
 167        }
 168        wa->dma.dir = dir;
 169
 170        return 0;
 171}
 172
 173static void ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
 174                            struct scatterlist *sg, unsigned int sg_offset,
 175                            unsigned int len)
 176{
 177        WARN_ON(!wa->address);
 178
 179        scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
 180                                 0);
 181}
 182
 183#if RHEL_SUPPORT_CCP_CRYPTO
 184static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
 185                            struct scatterlist *sg, unsigned int sg_offset,
 186                            unsigned int len)
 187{
 188        WARN_ON(!wa->address);
 189
 190        scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
 191                                 1);
 192}
 193
 194static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
 195                                   struct scatterlist *sg,
 196                                   unsigned int len, unsigned int se_len,
 197                                   bool sign_extend)
 198{
 199        unsigned int nbytes, sg_offset, dm_offset, sb_len, i;
 200        u8 buffer[CCP_REVERSE_BUF_SIZE];
 201
 202        if (WARN_ON(se_len > sizeof(buffer)))
 203                return -EINVAL;
 204
 205        sg_offset = len;
 206        dm_offset = 0;
 207        nbytes = len;
 208        while (nbytes) {
 209                sb_len = min_t(unsigned int, nbytes, se_len);
 210                sg_offset -= sb_len;
 211
 212                scatterwalk_map_and_copy(buffer, sg, sg_offset, sb_len, 0);
 213                for (i = 0; i < sb_len; i++)
 214                        wa->address[dm_offset + i] = buffer[sb_len - i - 1];
 215
 216                dm_offset += sb_len;
 217                nbytes -= sb_len;
 218
 219                if ((sb_len != se_len) && sign_extend) {
 220                        /* Must sign-extend to nearest sign-extend length */
 221                        if (wa->address[dm_offset - 1] & 0x80)
 222                                memset(wa->address + dm_offset, 0xff,
 223                                       se_len - sb_len);
 224                }
 225        }
 226
 227        return 0;
 228}
 229
 230static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa,
 231                                    struct scatterlist *sg,
 232                                    unsigned int len)
 233{
 234        unsigned int nbytes, sg_offset, dm_offset, sb_len, i;
 235        u8 buffer[CCP_REVERSE_BUF_SIZE];
 236
 237        sg_offset = 0;
 238        dm_offset = len;
 239        nbytes = len;
 240        while (nbytes) {
 241                sb_len = min_t(unsigned int, nbytes, sizeof(buffer));
 242                dm_offset -= sb_len;
 243
 244                for (i = 0; i < sb_len; i++)
 245                        buffer[sb_len - i - 1] = wa->address[dm_offset + i];
 246                scatterwalk_map_and_copy(buffer, sg, sg_offset, sb_len, 1);
 247
 248                sg_offset += sb_len;
 249                nbytes -= sb_len;
 250        }
 251}
 252#endif /* RHEL_SUPPORT_CCP_CRYPTO */
 253
 254static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q)
 255{
 256        ccp_dm_free(&data->dm_wa);
 257        ccp_sg_free(&data->sg_wa);
 258}
 259
 260static int ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q,
 261                         struct scatterlist *sg, u64 sg_len,
 262                         unsigned int dm_len,
 263                         enum dma_data_direction dir)
 264{
 265        int ret;
 266
 267        memset(data, 0, sizeof(*data));
 268
 269        ret = ccp_init_sg_workarea(&data->sg_wa, cmd_q->ccp->dev, sg, sg_len,
 270                                   dir);
 271        if (ret)
 272                goto e_err;
 273
 274        ret = ccp_init_dm_workarea(&data->dm_wa, cmd_q, dm_len, dir);
 275        if (ret)
 276                goto e_err;
 277
 278        return 0;
 279
 280e_err:
 281        ccp_free_data(data, cmd_q);
 282
 283        return ret;
 284}
 285
 286#if RHEL_SUPPORT_CCP_CRYPTO
 287static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from)
 288{
 289        struct ccp_sg_workarea *sg_wa = &data->sg_wa;
 290        struct ccp_dm_workarea *dm_wa = &data->dm_wa;
 291        unsigned int buf_count, nbytes;
 292
 293        /* Clear the buffer if setting it */
 294        if (!from)
 295                memset(dm_wa->address, 0, dm_wa->length);
 296
 297        if (!sg_wa->sg)
 298                return 0;
 299
 300        /* Perform the copy operation
 301         *   nbytes will always be <= UINT_MAX because dm_wa->length is
 302         *   an unsigned int
 303         */
 304        nbytes = min_t(u64, sg_wa->bytes_left, dm_wa->length);
 305        scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used,
 306                                 nbytes, from);
 307
 308        /* Update the structures and generate the count */
 309        buf_count = 0;
 310        while (sg_wa->bytes_left && (buf_count < dm_wa->length)) {
 311                nbytes = min(sg_wa->sg->length - sg_wa->sg_used,
 312                             dm_wa->length - buf_count);
 313                nbytes = min_t(u64, sg_wa->bytes_left, nbytes);
 314
 315                buf_count += nbytes;
 316                ccp_update_sg_workarea(sg_wa, nbytes);
 317        }
 318
 319        return buf_count;
 320}
 321
 322static unsigned int ccp_fill_queue_buf(struct ccp_data *data)
 323{
 324        return ccp_queue_buf(data, 0);
 325}
 326
 327static unsigned int ccp_empty_queue_buf(struct ccp_data *data)
 328{
 329        return ccp_queue_buf(data, 1);
 330}
 331
 332static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
 333                             struct ccp_op *op, unsigned int block_size,
 334                             bool blocksize_op)
 335{
 336        unsigned int sg_src_len, sg_dst_len, op_len;
 337
 338        /* The CCP can only DMA from/to one address each per operation. This
 339         * requires that we find the smallest DMA area between the source
 340         * and destination. The resulting len values will always be <= UINT_MAX
 341         * because the dma length is an unsigned int.
 342         */
 343        sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used;
 344        sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len);
 345
 346        if (dst) {
 347                sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used;
 348                sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len);
 349                op_len = min(sg_src_len, sg_dst_len);
 350        } else {
 351                op_len = sg_src_len;
 352        }
 353
 354        /* The data operation length will be at least block_size in length
 355         * or the smaller of available sg room remaining for the source or
 356         * the destination
 357         */
 358        op_len = max(op_len, block_size);
 359
 360        /* Unless we have to buffer data, there's no reason to wait */
 361        op->soc = 0;
 362
 363        if (sg_src_len < block_size) {
 364                /* Not enough data in the sg element, so it
 365                 * needs to be buffered into a blocksize chunk
 366                 */
 367                int cp_len = ccp_fill_queue_buf(src);
 368
 369                op->soc = 1;
 370                op->src.u.dma.address = src->dm_wa.dma.address;
 371                op->src.u.dma.offset = 0;
 372                op->src.u.dma.length = (blocksize_op) ? block_size : cp_len;
 373        } else {
 374                /* Enough data in the sg element, but we need to
 375                 * adjust for any previously copied data
 376                 */
 377                op->src.u.dma.address = sg_dma_address(src->sg_wa.sg);
 378                op->src.u.dma.offset = src->sg_wa.sg_used;
 379                op->src.u.dma.length = op_len & ~(block_size - 1);
 380
 381                ccp_update_sg_workarea(&src->sg_wa, op->src.u.dma.length);
 382        }
 383
 384        if (dst) {
 385                if (sg_dst_len < block_size) {
 386                        /* Not enough room in the sg element or we're on the
 387                         * last piece of data (when using padding), so the
 388                         * output needs to be buffered into a blocksize chunk
 389                         */
 390                        op->soc = 1;
 391                        op->dst.u.dma.address = dst->dm_wa.dma.address;
 392                        op->dst.u.dma.offset = 0;
 393                        op->dst.u.dma.length = op->src.u.dma.length;
 394                } else {
 395                        /* Enough room in the sg element, but we need to
 396                         * adjust for any previously used area
 397                         */
 398                        op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg);
 399                        op->dst.u.dma.offset = dst->sg_wa.sg_used;
 400                        op->dst.u.dma.length = op->src.u.dma.length;
 401                }
 402        }
 403}
 404
 405static void ccp_process_data(struct ccp_data *src, struct ccp_data *dst,
 406                             struct ccp_op *op)
 407{
 408        op->init = 0;
 409
 410        if (dst) {
 411                if (op->dst.u.dma.address == dst->dm_wa.dma.address)
 412                        ccp_empty_queue_buf(dst);
 413                else
 414                        ccp_update_sg_workarea(&dst->sg_wa,
 415                                               op->dst.u.dma.length);
 416        }
 417}
 418#endif /* RHEL_SUPPORT_CCP_CRYPTO */
 419
 420static int ccp_copy_to_from_sb(struct ccp_cmd_queue *cmd_q,
 421                               struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
 422                               u32 byte_swap, bool from)
 423{
 424        struct ccp_op op;
 425
 426        memset(&op, 0, sizeof(op));
 427
 428        op.cmd_q = cmd_q;
 429        op.jobid = jobid;
 430        op.eom = 1;
 431
 432        if (from) {
 433                op.soc = 1;
 434                op.src.type = CCP_MEMTYPE_SB;
 435                op.src.u.sb = sb;
 436                op.dst.type = CCP_MEMTYPE_SYSTEM;
 437                op.dst.u.dma.address = wa->dma.address;
 438                op.dst.u.dma.length = wa->length;
 439        } else {
 440                op.src.type = CCP_MEMTYPE_SYSTEM;
 441                op.src.u.dma.address = wa->dma.address;
 442                op.src.u.dma.length = wa->length;
 443                op.dst.type = CCP_MEMTYPE_SB;
 444                op.dst.u.sb = sb;
 445        }
 446
 447        op.u.passthru.byte_swap = byte_swap;
 448
 449        return cmd_q->ccp->vdata->perform->passthru(&op);
 450}
 451
 452static int ccp_copy_to_sb(struct ccp_cmd_queue *cmd_q,
 453                          struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
 454                          u32 byte_swap)
 455{
 456        return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, false);
 457}
 458
 459#if RHEL_SUPPORT_CCP_CRYPTO
 460static int ccp_copy_from_sb(struct ccp_cmd_queue *cmd_q,
 461                            struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
 462                            u32 byte_swap)
 463{
 464        return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, true);
 465}
 466
 467static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
 468                                struct ccp_cmd *cmd)
 469{
 470        struct ccp_aes_engine *aes = &cmd->u.aes;
 471        struct ccp_dm_workarea key, ctx;
 472        struct ccp_data src;
 473        struct ccp_op op;
 474        unsigned int dm_offset;
 475        int ret;
 476
 477        if (!((aes->key_len == AES_KEYSIZE_128) ||
 478              (aes->key_len == AES_KEYSIZE_192) ||
 479              (aes->key_len == AES_KEYSIZE_256)))
 480                return -EINVAL;
 481
 482        if (aes->src_len & (AES_BLOCK_SIZE - 1))
 483                return -EINVAL;
 484
 485        if (aes->iv_len != AES_BLOCK_SIZE)
 486                return -EINVAL;
 487
 488        if (!aes->key || !aes->iv || !aes->src)
 489                return -EINVAL;
 490
 491        if (aes->cmac_final) {
 492                if (aes->cmac_key_len != AES_BLOCK_SIZE)
 493                        return -EINVAL;
 494
 495                if (!aes->cmac_key)
 496                        return -EINVAL;
 497        }
 498
 499        BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
 500        BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
 501
 502        ret = -EIO;
 503        memset(&op, 0, sizeof(op));
 504        op.cmd_q = cmd_q;
 505        op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
 506        op.sb_key = cmd_q->sb_key;
 507        op.sb_ctx = cmd_q->sb_ctx;
 508        op.init = 1;
 509        op.u.aes.type = aes->type;
 510        op.u.aes.mode = aes->mode;
 511        op.u.aes.action = aes->action;
 512
 513        /* All supported key sizes fit in a single (32-byte) SB entry
 514         * and must be in little endian format. Use the 256-bit byte
 515         * swap passthru option to convert from big endian to little
 516         * endian.
 517         */
 518        ret = ccp_init_dm_workarea(&key, cmd_q,
 519                                   CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
 520                                   DMA_TO_DEVICE);
 521        if (ret)
 522                return ret;
 523
 524        dm_offset = CCP_SB_BYTES - aes->key_len;
 525        ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
 526        ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
 527                             CCP_PASSTHRU_BYTESWAP_256BIT);
 528        if (ret) {
 529                cmd->engine_error = cmd_q->cmd_error;
 530                goto e_key;
 531        }
 532
 533        /* The AES context fits in a single (32-byte) SB entry and
 534         * must be in little endian format. Use the 256-bit byte swap
 535         * passthru option to convert from big endian to little endian.
 536         */
 537        ret = ccp_init_dm_workarea(&ctx, cmd_q,
 538                                   CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
 539                                   DMA_BIDIRECTIONAL);
 540        if (ret)
 541                goto e_key;
 542
 543        dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
 544        ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
 545        ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
 546                             CCP_PASSTHRU_BYTESWAP_256BIT);
 547        if (ret) {
 548                cmd->engine_error = cmd_q->cmd_error;
 549                goto e_ctx;
 550        }
 551
 552        /* Send data to the CCP AES engine */
 553        ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
 554                            AES_BLOCK_SIZE, DMA_TO_DEVICE);
 555        if (ret)
 556                goto e_ctx;
 557
 558        while (src.sg_wa.bytes_left) {
 559                ccp_prepare_data(&src, NULL, &op, AES_BLOCK_SIZE, true);
 560                if (aes->cmac_final && !src.sg_wa.bytes_left) {
 561                        op.eom = 1;
 562
 563                        /* Push the K1/K2 key to the CCP now */
 564                        ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid,
 565                                               op.sb_ctx,
 566                                               CCP_PASSTHRU_BYTESWAP_256BIT);
 567                        if (ret) {
 568                                cmd->engine_error = cmd_q->cmd_error;
 569                                goto e_src;
 570                        }
 571
 572                        ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
 573                                        aes->cmac_key_len);
 574                        ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
 575                                             CCP_PASSTHRU_BYTESWAP_256BIT);
 576                        if (ret) {
 577                                cmd->engine_error = cmd_q->cmd_error;
 578                                goto e_src;
 579                        }
 580                }
 581
 582                ret = cmd_q->ccp->vdata->perform->aes(&op);
 583                if (ret) {
 584                        cmd->engine_error = cmd_q->cmd_error;
 585                        goto e_src;
 586                }
 587
 588                ccp_process_data(&src, NULL, &op);
 589        }
 590
 591        /* Retrieve the AES context - convert from LE to BE using
 592         * 32-byte (256-bit) byteswapping
 593         */
 594        ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
 595                               CCP_PASSTHRU_BYTESWAP_256BIT);
 596        if (ret) {
 597                cmd->engine_error = cmd_q->cmd_error;
 598                goto e_src;
 599        }
 600
 601        /* ...but we only need AES_BLOCK_SIZE bytes */
 602        dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
 603        ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
 604
 605e_src:
 606        ccp_free_data(&src, cmd_q);
 607
 608e_ctx:
 609        ccp_dm_free(&ctx);
 610
 611e_key:
 612        ccp_dm_free(&key);
 613
 614        return ret;
 615}
 616
 617static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
 618{
 619        struct ccp_aes_engine *aes = &cmd->u.aes;
 620        struct ccp_dm_workarea key, ctx;
 621        struct ccp_data src, dst;
 622        struct ccp_op op;
 623        unsigned int dm_offset;
 624        bool in_place = false;
 625        int ret;
 626
 627        if (aes->mode == CCP_AES_MODE_CMAC)
 628                return ccp_run_aes_cmac_cmd(cmd_q, cmd);
 629
 630        if (!((aes->key_len == AES_KEYSIZE_128) ||
 631              (aes->key_len == AES_KEYSIZE_192) ||
 632              (aes->key_len == AES_KEYSIZE_256)))
 633                return -EINVAL;
 634
 635        if (((aes->mode == CCP_AES_MODE_ECB) ||
 636             (aes->mode == CCP_AES_MODE_CBC) ||
 637             (aes->mode == CCP_AES_MODE_CFB)) &&
 638            (aes->src_len & (AES_BLOCK_SIZE - 1)))
 639                return -EINVAL;
 640
 641        if (!aes->key || !aes->src || !aes->dst)
 642                return -EINVAL;
 643
 644        if (aes->mode != CCP_AES_MODE_ECB) {
 645                if (aes->iv_len != AES_BLOCK_SIZE)
 646                        return -EINVAL;
 647
 648                if (!aes->iv)
 649                        return -EINVAL;
 650        }
 651
 652        BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
 653        BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
 654
 655        ret = -EIO;
 656        memset(&op, 0, sizeof(op));
 657        op.cmd_q = cmd_q;
 658        op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
 659        op.sb_key = cmd_q->sb_key;
 660        op.sb_ctx = cmd_q->sb_ctx;
 661        op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1;
 662        op.u.aes.type = aes->type;
 663        op.u.aes.mode = aes->mode;
 664        op.u.aes.action = aes->action;
 665
 666        /* All supported key sizes fit in a single (32-byte) SB entry
 667         * and must be in little endian format. Use the 256-bit byte
 668         * swap passthru option to convert from big endian to little
 669         * endian.
 670         */
 671        ret = ccp_init_dm_workarea(&key, cmd_q,
 672                                   CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
 673                                   DMA_TO_DEVICE);
 674        if (ret)
 675                return ret;
 676
 677        dm_offset = CCP_SB_BYTES - aes->key_len;
 678        ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
 679        ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
 680                             CCP_PASSTHRU_BYTESWAP_256BIT);
 681        if (ret) {
 682                cmd->engine_error = cmd_q->cmd_error;
 683                goto e_key;
 684        }
 685
 686        /* The AES context fits in a single (32-byte) SB entry and
 687         * must be in little endian format. Use the 256-bit byte swap
 688         * passthru option to convert from big endian to little endian.
 689         */
 690        ret = ccp_init_dm_workarea(&ctx, cmd_q,
 691                                   CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
 692                                   DMA_BIDIRECTIONAL);
 693        if (ret)
 694                goto e_key;
 695
 696        if (aes->mode != CCP_AES_MODE_ECB) {
 697                /* Load the AES context - convert to LE */
 698                dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
 699                ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
 700                ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
 701                                     CCP_PASSTHRU_BYTESWAP_256BIT);
 702                if (ret) {
 703                        cmd->engine_error = cmd_q->cmd_error;
 704                        goto e_ctx;
 705                }
 706        }
 707
 708        /* Prepare the input and output data workareas. For in-place
 709         * operations we need to set the dma direction to BIDIRECTIONAL
 710         * and copy the src workarea to the dst workarea.
 711         */
 712        if (sg_virt(aes->src) == sg_virt(aes->dst))
 713                in_place = true;
 714
 715        ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
 716                            AES_BLOCK_SIZE,
 717                            in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
 718        if (ret)
 719                goto e_ctx;
 720
 721        if (in_place) {
 722                dst = src;
 723        } else {
 724                ret = ccp_init_data(&dst, cmd_q, aes->dst, aes->src_len,
 725                                    AES_BLOCK_SIZE, DMA_FROM_DEVICE);
 726                if (ret)
 727                        goto e_src;
 728        }
 729
 730        /* Send data to the CCP AES engine */
 731        while (src.sg_wa.bytes_left) {
 732                ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
 733                if (!src.sg_wa.bytes_left) {
 734                        op.eom = 1;
 735
 736                        /* Since we don't retrieve the AES context in ECB
 737                         * mode we have to wait for the operation to complete
 738                         * on the last piece of data
 739                         */
 740                        if (aes->mode == CCP_AES_MODE_ECB)
 741                                op.soc = 1;
 742                }
 743
 744                ret = cmd_q->ccp->vdata->perform->aes(&op);
 745                if (ret) {
 746                        cmd->engine_error = cmd_q->cmd_error;
 747                        goto e_dst;
 748                }
 749
 750                ccp_process_data(&src, &dst, &op);
 751        }
 752
 753        if (aes->mode != CCP_AES_MODE_ECB) {
 754                /* Retrieve the AES context - convert from LE to BE using
 755                 * 32-byte (256-bit) byteswapping
 756                 */
 757                ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
 758                                       CCP_PASSTHRU_BYTESWAP_256BIT);
 759                if (ret) {
 760                        cmd->engine_error = cmd_q->cmd_error;
 761                        goto e_dst;
 762                }
 763
 764                /* ...but we only need AES_BLOCK_SIZE bytes */
 765                dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
 766                ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
 767        }
 768
 769e_dst:
 770        if (!in_place)
 771                ccp_free_data(&dst, cmd_q);
 772
 773e_src:
 774        ccp_free_data(&src, cmd_q);
 775
 776e_ctx:
 777        ccp_dm_free(&ctx);
 778
 779e_key:
 780        ccp_dm_free(&key);
 781
 782        return ret;
 783}
 784
 785static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
 786                               struct ccp_cmd *cmd)
 787{
 788        struct ccp_xts_aes_engine *xts = &cmd->u.xts;
 789        struct ccp_dm_workarea key, ctx;
 790        struct ccp_data src, dst;
 791        struct ccp_op op;
 792        unsigned int unit_size, dm_offset;
 793        bool in_place = false;
 794        int ret;
 795
 796        switch (xts->unit_size) {
 797        case CCP_XTS_AES_UNIT_SIZE_16:
 798                unit_size = 16;
 799                break;
 800        case CCP_XTS_AES_UNIT_SIZE_512:
 801                unit_size = 512;
 802                break;
 803        case CCP_XTS_AES_UNIT_SIZE_1024:
 804                unit_size = 1024;
 805                break;
 806        case CCP_XTS_AES_UNIT_SIZE_2048:
 807                unit_size = 2048;
 808                break;
 809        case CCP_XTS_AES_UNIT_SIZE_4096:
 810                unit_size = 4096;
 811                break;
 812
 813        default:
 814                return -EINVAL;
 815        }
 816
 817        if (xts->key_len != AES_KEYSIZE_128)
 818                return -EINVAL;
 819
 820        if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1)))
 821                return -EINVAL;
 822
 823        if (xts->iv_len != AES_BLOCK_SIZE)
 824                return -EINVAL;
 825
 826        if (!xts->key || !xts->iv || !xts->src || !xts->dst)
 827                return -EINVAL;
 828
 829        BUILD_BUG_ON(CCP_XTS_AES_KEY_SB_COUNT != 1);
 830        BUILD_BUG_ON(CCP_XTS_AES_CTX_SB_COUNT != 1);
 831
 832        ret = -EIO;
 833        memset(&op, 0, sizeof(op));
 834        op.cmd_q = cmd_q;
 835        op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
 836        op.sb_key = cmd_q->sb_key;
 837        op.sb_ctx = cmd_q->sb_ctx;
 838        op.init = 1;
 839        op.u.xts.action = xts->action;
 840        op.u.xts.unit_size = xts->unit_size;
 841
 842        /* All supported key sizes fit in a single (32-byte) SB entry
 843         * and must be in little endian format. Use the 256-bit byte
 844         * swap passthru option to convert from big endian to little
 845         * endian.
 846         */
 847        ret = ccp_init_dm_workarea(&key, cmd_q,
 848                                   CCP_XTS_AES_KEY_SB_COUNT * CCP_SB_BYTES,
 849                                   DMA_TO_DEVICE);
 850        if (ret)
 851                return ret;
 852
 853        dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
 854        ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
 855        ccp_set_dm_area(&key, 0, xts->key, dm_offset, xts->key_len);
 856        ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
 857                             CCP_PASSTHRU_BYTESWAP_256BIT);
 858        if (ret) {
 859                cmd->engine_error = cmd_q->cmd_error;
 860                goto e_key;
 861        }
 862
 863        /* The AES context fits in a single (32-byte) SB entry and
 864         * for XTS is already in little endian format so no byte swapping
 865         * is needed.
 866         */
 867        ret = ccp_init_dm_workarea(&ctx, cmd_q,
 868                                   CCP_XTS_AES_CTX_SB_COUNT * CCP_SB_BYTES,
 869                                   DMA_BIDIRECTIONAL);
 870        if (ret)
 871                goto e_key;
 872
 873        ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
 874        ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
 875                             CCP_PASSTHRU_BYTESWAP_NOOP);
 876        if (ret) {
 877                cmd->engine_error = cmd_q->cmd_error;
 878                goto e_ctx;
 879        }
 880
 881        /* Prepare the input and output data workareas. For in-place
 882         * operations we need to set the dma direction to BIDIRECTIONAL
 883         * and copy the src workarea to the dst workarea.
 884         */
 885        if (sg_virt(xts->src) == sg_virt(xts->dst))
 886                in_place = true;
 887
 888        ret = ccp_init_data(&src, cmd_q, xts->src, xts->src_len,
 889                            unit_size,
 890                            in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
 891        if (ret)
 892                goto e_ctx;
 893
 894        if (in_place) {
 895                dst = src;
 896        } else {
 897                ret = ccp_init_data(&dst, cmd_q, xts->dst, xts->src_len,
 898                                    unit_size, DMA_FROM_DEVICE);
 899                if (ret)
 900                        goto e_src;
 901        }
 902
 903        /* Send data to the CCP AES engine */
 904        while (src.sg_wa.bytes_left) {
 905                ccp_prepare_data(&src, &dst, &op, unit_size, true);
 906                if (!src.sg_wa.bytes_left)
 907                        op.eom = 1;
 908
 909                ret = cmd_q->ccp->vdata->perform->xts_aes(&op);
 910                if (ret) {
 911                        cmd->engine_error = cmd_q->cmd_error;
 912                        goto e_dst;
 913                }
 914
 915                ccp_process_data(&src, &dst, &op);
 916        }
 917
 918        /* Retrieve the AES context - convert from LE to BE using
 919         * 32-byte (256-bit) byteswapping
 920         */
 921        ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
 922                               CCP_PASSTHRU_BYTESWAP_256BIT);
 923        if (ret) {
 924                cmd->engine_error = cmd_q->cmd_error;
 925                goto e_dst;
 926        }
 927
 928        /* ...but we only need AES_BLOCK_SIZE bytes */
 929        dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
 930        ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len);
 931
 932e_dst:
 933        if (!in_place)
 934                ccp_free_data(&dst, cmd_q);
 935
 936e_src:
 937        ccp_free_data(&src, cmd_q);
 938
 939e_ctx:
 940        ccp_dm_free(&ctx);
 941
 942e_key:
 943        ccp_dm_free(&key);
 944
 945        return ret;
 946}
 947
 948static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
 949{
 950        struct ccp_sha_engine *sha = &cmd->u.sha;
 951        struct ccp_dm_workarea ctx;
 952        struct ccp_data src;
 953        struct ccp_op op;
 954        unsigned int ioffset, ooffset;
 955        unsigned int digest_size;
 956        int sb_count;
 957        const void *init;
 958        u64 block_size;
 959        int ctx_size;
 960        int ret;
 961
 962        switch (sha->type) {
 963        case CCP_SHA_TYPE_1:
 964                if (sha->ctx_len < SHA1_DIGEST_SIZE)
 965                        return -EINVAL;
 966                block_size = SHA1_BLOCK_SIZE;
 967                break;
 968        case CCP_SHA_TYPE_224:
 969                if (sha->ctx_len < SHA224_DIGEST_SIZE)
 970                        return -EINVAL;
 971                block_size = SHA224_BLOCK_SIZE;
 972                break;
 973        case CCP_SHA_TYPE_256:
 974                if (sha->ctx_len < SHA256_DIGEST_SIZE)
 975                        return -EINVAL;
 976                block_size = SHA256_BLOCK_SIZE;
 977                break;
 978        default:
 979                return -EINVAL;
 980        }
 981
 982        if (!sha->ctx)
 983                return -EINVAL;
 984
 985        if (!sha->final && (sha->src_len & (block_size - 1)))
 986                return -EINVAL;
 987
 988        /* The version 3 device can't handle zero-length input */
 989        if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
 990
 991                if (!sha->src_len) {
 992                        unsigned int digest_len;
 993                        const u8 *sha_zero;
 994
 995                        /* Not final, just return */
 996                        if (!sha->final)
 997                                return 0;
 998
 999                        /* CCP can't do a zero length sha operation so the
1000                         * caller must buffer the data.
1001                         */
1002                        if (sha->msg_bits)
1003                                return -EINVAL;
1004
1005                        /* The CCP cannot perform zero-length sha operations
1006                         * so the caller is required to buffer data for the
1007                         * final operation. However, a sha operation for a
1008                         * message with a total length of zero is valid so
1009                         * known values are required to supply the result.
1010                         */
1011                        switch (sha->type) {
1012                        case CCP_SHA_TYPE_1:
1013                                sha_zero = sha1_zero_message_hash;
1014                                digest_len = SHA1_DIGEST_SIZE;
1015                                break;
1016                        case CCP_SHA_TYPE_224:
1017                                sha_zero = sha224_zero_message_hash;
1018                                digest_len = SHA224_DIGEST_SIZE;
1019                                break;
1020                        case CCP_SHA_TYPE_256:
1021                                sha_zero = sha256_zero_message_hash;
1022                                digest_len = SHA256_DIGEST_SIZE;
1023                                break;
1024                        default:
1025                                return -EINVAL;
1026                        }
1027
1028                        scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0,
1029                                                 digest_len, 1);
1030
1031                        return 0;
1032                }
1033        }
1034
1035        /* Set variables used throughout */
1036        switch (sha->type) {
1037        case CCP_SHA_TYPE_1:
1038                digest_size = SHA1_DIGEST_SIZE;
1039                init = (void *) ccp_sha1_init;
1040                ctx_size = SHA1_DIGEST_SIZE;
1041                sb_count = 1;
1042                if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
1043                        ooffset = ioffset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
1044                else
1045                        ooffset = ioffset = 0;
1046                break;
1047        case CCP_SHA_TYPE_224:
1048                digest_size = SHA224_DIGEST_SIZE;
1049                init = (void *) ccp_sha224_init;
1050                ctx_size = SHA256_DIGEST_SIZE;
1051                sb_count = 1;
1052                ioffset = 0;
1053                if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
1054                        ooffset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
1055                else
1056                        ooffset = 0;
1057                break;
1058        case CCP_SHA_TYPE_256:
1059                digest_size = SHA256_DIGEST_SIZE;
1060                init = (void *) ccp_sha256_init;
1061                ctx_size = SHA256_DIGEST_SIZE;
1062                sb_count = 1;
1063                ooffset = ioffset = 0;
1064                break;
1065        default:
1066                ret = -EINVAL;
1067                goto e_data;
1068        }
1069
1070        /* For zero-length plaintext the src pointer is ignored;
1071         * otherwise both parts must be valid
1072         */
1073        if (sha->src_len && !sha->src)
1074                return -EINVAL;
1075
1076        memset(&op, 0, sizeof(op));
1077        op.cmd_q = cmd_q;
1078        op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1079        op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
1080        op.u.sha.type = sha->type;
1081        op.u.sha.msg_bits = sha->msg_bits;
1082
1083        ret = ccp_init_dm_workarea(&ctx, cmd_q, sb_count * CCP_SB_BYTES,
1084                                   DMA_BIDIRECTIONAL);
1085        if (ret)
1086                return ret;
1087        if (sha->first) {
1088                switch (sha->type) {
1089                case CCP_SHA_TYPE_1:
1090                case CCP_SHA_TYPE_224:
1091                case CCP_SHA_TYPE_256:
1092                        memcpy(ctx.address + ioffset, init, ctx_size);
1093                        break;
1094                default:
1095                        ret = -EINVAL;
1096                        goto e_ctx;
1097                }
1098        } else {
1099                /* Restore the context */
1100                ccp_set_dm_area(&ctx, 0, sha->ctx, 0,
1101                                sb_count * CCP_SB_BYTES);
1102        }
1103
1104        ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1105                             CCP_PASSTHRU_BYTESWAP_256BIT);
1106        if (ret) {
1107                cmd->engine_error = cmd_q->cmd_error;
1108                goto e_ctx;
1109        }
1110
1111        if (sha->src) {
1112                /* Send data to the CCP SHA engine; block_size is set above */
1113                ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len,
1114                                    block_size, DMA_TO_DEVICE);
1115                if (ret)
1116                        goto e_ctx;
1117
1118                while (src.sg_wa.bytes_left) {
1119                        ccp_prepare_data(&src, NULL, &op, block_size, false);
1120                        if (sha->final && !src.sg_wa.bytes_left)
1121                                op.eom = 1;
1122
1123                        ret = cmd_q->ccp->vdata->perform->sha(&op);
1124                        if (ret) {
1125                                cmd->engine_error = cmd_q->cmd_error;
1126                                goto e_data;
1127                        }
1128
1129                        ccp_process_data(&src, NULL, &op);
1130                }
1131        } else {
1132                op.eom = 1;
1133                ret = cmd_q->ccp->vdata->perform->sha(&op);
1134                if (ret) {
1135                        cmd->engine_error = cmd_q->cmd_error;
1136                        goto e_data;
1137                }
1138        }
1139
1140        /* Retrieve the SHA context - convert from LE to BE using
1141         * 32-byte (256-bit) byteswapping to BE
1142         */
1143        ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1144                               CCP_PASSTHRU_BYTESWAP_256BIT);
1145        if (ret) {
1146                cmd->engine_error = cmd_q->cmd_error;
1147                goto e_data;
1148        }
1149
1150        if (sha->final) {
1151                /* Finishing up, so get the digest */
1152                switch (sha->type) {
1153                case CCP_SHA_TYPE_1:
1154                case CCP_SHA_TYPE_224:
1155                case CCP_SHA_TYPE_256:
1156                        ccp_get_dm_area(&ctx, ooffset,
1157                                        sha->ctx, 0,
1158                                        digest_size);
1159                        break;
1160                default:
1161                        ret = -EINVAL;
1162                        goto e_ctx;
1163                }
1164        } else {
1165                /* Stash the context */
1166                ccp_get_dm_area(&ctx, 0, sha->ctx, 0,
1167                                sb_count * CCP_SB_BYTES);
1168        }
1169
1170        if (sha->final && sha->opad) {
1171                /* HMAC operation, recursively perform final SHA */
1172                struct ccp_cmd hmac_cmd;
1173                struct scatterlist sg;
1174                u8 *hmac_buf;
1175
1176                if (sha->opad_len != block_size) {
1177                        ret = -EINVAL;
1178                        goto e_data;
1179                }
1180
1181                hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL);
1182                if (!hmac_buf) {
1183                        ret = -ENOMEM;
1184                        goto e_data;
1185                }
1186                sg_init_one(&sg, hmac_buf, block_size + digest_size);
1187
1188                scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0);
1189                switch (sha->type) {
1190                case CCP_SHA_TYPE_1:
1191                case CCP_SHA_TYPE_224:
1192                case CCP_SHA_TYPE_256:
1193                        memcpy(hmac_buf + block_size,
1194                               ctx.address + ooffset,
1195                               digest_size);
1196                        break;
1197                default:
1198                        ret = -EINVAL;
1199                        goto e_ctx;
1200                }
1201
1202                memset(&hmac_cmd, 0, sizeof(hmac_cmd));
1203                hmac_cmd.engine = CCP_ENGINE_SHA;
1204                hmac_cmd.u.sha.type = sha->type;
1205                hmac_cmd.u.sha.ctx = sha->ctx;
1206                hmac_cmd.u.sha.ctx_len = sha->ctx_len;
1207                hmac_cmd.u.sha.src = &sg;
1208                hmac_cmd.u.sha.src_len = block_size + digest_size;
1209                hmac_cmd.u.sha.opad = NULL;
1210                hmac_cmd.u.sha.opad_len = 0;
1211                hmac_cmd.u.sha.first = 1;
1212                hmac_cmd.u.sha.final = 1;
1213                hmac_cmd.u.sha.msg_bits = (block_size + digest_size) << 3;
1214
1215                ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd);
1216                if (ret)
1217                        cmd->engine_error = hmac_cmd.engine_error;
1218
1219                kfree(hmac_buf);
1220        }
1221
1222e_data:
1223        if (sha->src)
1224                ccp_free_data(&src, cmd_q);
1225
1226e_ctx:
1227        ccp_dm_free(&ctx);
1228
1229        return ret;
1230}
1231
1232static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1233{
1234        struct ccp_rsa_engine *rsa = &cmd->u.rsa;
1235        struct ccp_dm_workarea exp, src;
1236        struct ccp_data dst;
1237        struct ccp_op op;
1238        unsigned int sb_count, i_len, o_len;
1239        int ret;
1240
1241        if (rsa->key_size > CCP_RSA_MAX_WIDTH)
1242                return -EINVAL;
1243
1244        if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst)
1245                return -EINVAL;
1246
1247        /* The RSA modulus must precede the message being acted upon, so
1248         * it must be copied to a DMA area where the message and the
1249         * modulus can be concatenated.  Therefore the input buffer
1250         * length required is twice the output buffer length (which
1251         * must be a multiple of 256-bits).
1252         */
1253        o_len = ((rsa->key_size + 255) / 256) * 32;
1254        i_len = o_len * 2;
1255
1256        sb_count = o_len / CCP_SB_BYTES;
1257
1258        memset(&op, 0, sizeof(op));
1259        op.cmd_q = cmd_q;
1260        op.jobid = ccp_gen_jobid(cmd_q->ccp);
1261        op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q, sb_count);
1262
1263        if (!op.sb_key)
1264                return -EIO;
1265
1266        /* The RSA exponent may span multiple (32-byte) SB entries and must
1267         * be in little endian format. Reverse copy each 32-byte chunk
1268         * of the exponent (En chunk to E0 chunk, E(n-1) chunk to E1 chunk)
1269         * and each byte within that chunk and do not perform any byte swap
1270         * operations on the passthru operation.
1271         */
1272        ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE);
1273        if (ret)
1274                goto e_sb;
1275
1276        ret = ccp_reverse_set_dm_area(&exp, rsa->exp, rsa->exp_len,
1277                                      CCP_SB_BYTES, false);
1278        if (ret)
1279                goto e_exp;
1280        ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key,
1281                             CCP_PASSTHRU_BYTESWAP_NOOP);
1282        if (ret) {
1283                cmd->engine_error = cmd_q->cmd_error;
1284                goto e_exp;
1285        }
1286
1287        /* Concatenate the modulus and the message. Both the modulus and
1288         * the operands must be in little endian format.  Since the input
1289         * is in big endian format it must be converted.
1290         */
1291        ret = ccp_init_dm_workarea(&src, cmd_q, i_len, DMA_TO_DEVICE);
1292        if (ret)
1293                goto e_exp;
1294
1295        ret = ccp_reverse_set_dm_area(&src, rsa->mod, rsa->mod_len,
1296                                      CCP_SB_BYTES, false);
1297        if (ret)
1298                goto e_src;
1299        src.address += o_len;   /* Adjust the address for the copy operation */
1300        ret = ccp_reverse_set_dm_area(&src, rsa->src, rsa->src_len,
1301                                      CCP_SB_BYTES, false);
1302        if (ret)
1303                goto e_src;
1304        src.address -= o_len;   /* Reset the address to original value */
1305
1306        /* Prepare the output area for the operation */
1307        ret = ccp_init_data(&dst, cmd_q, rsa->dst, rsa->mod_len,
1308                            o_len, DMA_FROM_DEVICE);
1309        if (ret)
1310                goto e_src;
1311
1312        op.soc = 1;
1313        op.src.u.dma.address = src.dma.address;
1314        op.src.u.dma.offset = 0;
1315        op.src.u.dma.length = i_len;
1316        op.dst.u.dma.address = dst.dm_wa.dma.address;
1317        op.dst.u.dma.offset = 0;
1318        op.dst.u.dma.length = o_len;
1319
1320        op.u.rsa.mod_size = rsa->key_size;
1321        op.u.rsa.input_len = i_len;
1322
1323        ret = cmd_q->ccp->vdata->perform->rsa(&op);
1324        if (ret) {
1325                cmd->engine_error = cmd_q->cmd_error;
1326                goto e_dst;
1327        }
1328
1329        ccp_reverse_get_dm_area(&dst.dm_wa, rsa->dst, rsa->mod_len);
1330
1331e_dst:
1332        ccp_free_data(&dst, cmd_q);
1333
1334e_src:
1335        ccp_dm_free(&src);
1336
1337e_exp:
1338        ccp_dm_free(&exp);
1339
1340e_sb:
1341        cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count);
1342
1343        return ret;
1344}
1345#endif /* RHEL_SUPPORT_CCP_CRYPTO */
1346
1347static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
1348                                struct ccp_cmd *cmd)
1349{
1350        struct ccp_passthru_engine *pt = &cmd->u.passthru;
1351        struct ccp_dm_workarea mask;
1352        struct ccp_data src, dst;
1353        struct ccp_op op;
1354        bool in_place = false;
1355        unsigned int i;
1356        int ret = 0;
1357
1358        if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
1359                return -EINVAL;
1360
1361        if (!pt->src || !pt->dst)
1362                return -EINVAL;
1363
1364        if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1365                if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
1366                        return -EINVAL;
1367                if (!pt->mask)
1368                        return -EINVAL;
1369        }
1370
1371        BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
1372
1373        memset(&op, 0, sizeof(op));
1374        op.cmd_q = cmd_q;
1375        op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1376
1377        if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1378                /* Load the mask */
1379                op.sb_key = cmd_q->sb_key;
1380
1381                ret = ccp_init_dm_workarea(&mask, cmd_q,
1382                                           CCP_PASSTHRU_SB_COUNT *
1383                                           CCP_SB_BYTES,
1384                                           DMA_TO_DEVICE);
1385                if (ret)
1386                        return ret;
1387
1388                ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
1389                ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
1390                                     CCP_PASSTHRU_BYTESWAP_NOOP);
1391                if (ret) {
1392                        cmd->engine_error = cmd_q->cmd_error;
1393                        goto e_mask;
1394                }
1395        }
1396
1397        /* Prepare the input and output data workareas. For in-place
1398         * operations we need to set the dma direction to BIDIRECTIONAL
1399         * and copy the src workarea to the dst workarea.
1400         */
1401        if (sg_virt(pt->src) == sg_virt(pt->dst))
1402                in_place = true;
1403
1404        ret = ccp_init_data(&src, cmd_q, pt->src, pt->src_len,
1405                            CCP_PASSTHRU_MASKSIZE,
1406                            in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1407        if (ret)
1408                goto e_mask;
1409
1410        if (in_place) {
1411                dst = src;
1412        } else {
1413                ret = ccp_init_data(&dst, cmd_q, pt->dst, pt->src_len,
1414                                    CCP_PASSTHRU_MASKSIZE, DMA_FROM_DEVICE);
1415                if (ret)
1416                        goto e_src;
1417        }
1418
1419        /* Send data to the CCP Passthru engine
1420         *   Because the CCP engine works on a single source and destination
1421         *   dma address at a time, each entry in the source scatterlist
1422         *   (after the dma_map_sg call) must be less than or equal to the
1423         *   (remaining) length in the destination scatterlist entry and the
1424         *   length must be a multiple of CCP_PASSTHRU_BLOCKSIZE
1425         */
1426        dst.sg_wa.sg_used = 0;
1427        for (i = 1; i <= src.sg_wa.dma_count; i++) {
1428                if (!dst.sg_wa.sg ||
1429                    (dst.sg_wa.sg->length < src.sg_wa.sg->length)) {
1430                        ret = -EINVAL;
1431                        goto e_dst;
1432                }
1433
1434                if (i == src.sg_wa.dma_count) {
1435                        op.eom = 1;
1436                        op.soc = 1;
1437                }
1438
1439                op.src.type = CCP_MEMTYPE_SYSTEM;
1440                op.src.u.dma.address = sg_dma_address(src.sg_wa.sg);
1441                op.src.u.dma.offset = 0;
1442                op.src.u.dma.length = sg_dma_len(src.sg_wa.sg);
1443
1444                op.dst.type = CCP_MEMTYPE_SYSTEM;
1445                op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg);
1446                op.dst.u.dma.offset = dst.sg_wa.sg_used;
1447                op.dst.u.dma.length = op.src.u.dma.length;
1448
1449                ret = cmd_q->ccp->vdata->perform->passthru(&op);
1450                if (ret) {
1451                        cmd->engine_error = cmd_q->cmd_error;
1452                        goto e_dst;
1453                }
1454
1455                dst.sg_wa.sg_used += src.sg_wa.sg->length;
1456                if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) {
1457                        dst.sg_wa.sg = sg_next(dst.sg_wa.sg);
1458                        dst.sg_wa.sg_used = 0;
1459                }
1460                src.sg_wa.sg = sg_next(src.sg_wa.sg);
1461        }
1462
1463e_dst:
1464        if (!in_place)
1465                ccp_free_data(&dst, cmd_q);
1466
1467e_src:
1468        ccp_free_data(&src, cmd_q);
1469
1470e_mask:
1471        if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
1472                ccp_dm_free(&mask);
1473
1474        return ret;
1475}
1476
1477static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q,
1478                                      struct ccp_cmd *cmd)
1479{
1480        struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap;
1481        struct ccp_dm_workarea mask;
1482        struct ccp_op op;
1483        int ret;
1484
1485        if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
1486                return -EINVAL;
1487
1488        if (!pt->src_dma || !pt->dst_dma)
1489                return -EINVAL;
1490
1491        if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1492                if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
1493                        return -EINVAL;
1494                if (!pt->mask)
1495                        return -EINVAL;
1496        }
1497
1498        BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
1499
1500        memset(&op, 0, sizeof(op));
1501        op.cmd_q = cmd_q;
1502        op.jobid = ccp_gen_jobid(cmd_q->ccp);
1503
1504        if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1505                /* Load the mask */
1506                op.sb_key = cmd_q->sb_key;
1507
1508                mask.length = pt->mask_len;
1509                mask.dma.address = pt->mask;
1510                mask.dma.length = pt->mask_len;
1511
1512                ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
1513                                     CCP_PASSTHRU_BYTESWAP_NOOP);
1514                if (ret) {
1515                        cmd->engine_error = cmd_q->cmd_error;
1516                        return ret;
1517                }
1518        }
1519
1520        /* Send data to the CCP Passthru engine */
1521        op.eom = 1;
1522        op.soc = 1;
1523
1524        op.src.type = CCP_MEMTYPE_SYSTEM;
1525        op.src.u.dma.address = pt->src_dma;
1526        op.src.u.dma.offset = 0;
1527        op.src.u.dma.length = pt->src_len;
1528
1529        op.dst.type = CCP_MEMTYPE_SYSTEM;
1530        op.dst.u.dma.address = pt->dst_dma;
1531        op.dst.u.dma.offset = 0;
1532        op.dst.u.dma.length = pt->src_len;
1533
1534        ret = cmd_q->ccp->vdata->perform->passthru(&op);
1535        if (ret)
1536                cmd->engine_error = cmd_q->cmd_error;
1537
1538        return ret;
1539}
1540
1541#if RHEL_SUPPORT_CCP_CRYPTO
1542static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1543{
1544        struct ccp_ecc_engine *ecc = &cmd->u.ecc;
1545        struct ccp_dm_workarea src, dst;
1546        struct ccp_op op;
1547        int ret;
1548        u8 *save;
1549
1550        if (!ecc->u.mm.operand_1 ||
1551            (ecc->u.mm.operand_1_len > CCP_ECC_MODULUS_BYTES))
1552                return -EINVAL;
1553
1554        if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT)
1555                if (!ecc->u.mm.operand_2 ||
1556                    (ecc->u.mm.operand_2_len > CCP_ECC_MODULUS_BYTES))
1557                        return -EINVAL;
1558
1559        if (!ecc->u.mm.result ||
1560            (ecc->u.mm.result_len < CCP_ECC_MODULUS_BYTES))
1561                return -EINVAL;
1562
1563        memset(&op, 0, sizeof(op));
1564        op.cmd_q = cmd_q;
1565        op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1566
1567        /* Concatenate the modulus and the operands. Both the modulus and
1568         * the operands must be in little endian format.  Since the input
1569         * is in big endian format it must be converted and placed in a
1570         * fixed length buffer.
1571         */
1572        ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
1573                                   DMA_TO_DEVICE);
1574        if (ret)
1575                return ret;
1576
1577        /* Save the workarea address since it is updated in order to perform
1578         * the concatenation
1579         */
1580        save = src.address;
1581
1582        /* Copy the ECC modulus */
1583        ret = ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len,
1584                                      CCP_ECC_OPERAND_SIZE, false);
1585        if (ret)
1586                goto e_src;
1587        src.address += CCP_ECC_OPERAND_SIZE;
1588
1589        /* Copy the first operand */
1590        ret = ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_1,
1591                                      ecc->u.mm.operand_1_len,
1592                                      CCP_ECC_OPERAND_SIZE, false);
1593        if (ret)
1594                goto e_src;
1595        src.address += CCP_ECC_OPERAND_SIZE;
1596
1597        if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) {
1598                /* Copy the second operand */
1599                ret = ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_2,
1600                                              ecc->u.mm.operand_2_len,
1601                                              CCP_ECC_OPERAND_SIZE, false);
1602                if (ret)
1603                        goto e_src;
1604                src.address += CCP_ECC_OPERAND_SIZE;
1605        }
1606
1607        /* Restore the workarea address */
1608        src.address = save;
1609
1610        /* Prepare the output area for the operation */
1611        ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
1612                                   DMA_FROM_DEVICE);
1613        if (ret)
1614                goto e_src;
1615
1616        op.soc = 1;
1617        op.src.u.dma.address = src.dma.address;
1618        op.src.u.dma.offset = 0;
1619        op.src.u.dma.length = src.length;
1620        op.dst.u.dma.address = dst.dma.address;
1621        op.dst.u.dma.offset = 0;
1622        op.dst.u.dma.length = dst.length;
1623
1624        op.u.ecc.function = cmd->u.ecc.function;
1625
1626        ret = cmd_q->ccp->vdata->perform->ecc(&op);
1627        if (ret) {
1628                cmd->engine_error = cmd_q->cmd_error;
1629                goto e_dst;
1630        }
1631
1632        ecc->ecc_result = le16_to_cpup(
1633                (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
1634        if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
1635                ret = -EIO;
1636                goto e_dst;
1637        }
1638
1639        /* Save the ECC result */
1640        ccp_reverse_get_dm_area(&dst, ecc->u.mm.result, CCP_ECC_MODULUS_BYTES);
1641
1642e_dst:
1643        ccp_dm_free(&dst);
1644
1645e_src:
1646        ccp_dm_free(&src);
1647
1648        return ret;
1649}
1650
1651static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1652{
1653        struct ccp_ecc_engine *ecc = &cmd->u.ecc;
1654        struct ccp_dm_workarea src, dst;
1655        struct ccp_op op;
1656        int ret;
1657        u8 *save;
1658
1659        if (!ecc->u.pm.point_1.x ||
1660            (ecc->u.pm.point_1.x_len > CCP_ECC_MODULUS_BYTES) ||
1661            !ecc->u.pm.point_1.y ||
1662            (ecc->u.pm.point_1.y_len > CCP_ECC_MODULUS_BYTES))
1663                return -EINVAL;
1664
1665        if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
1666                if (!ecc->u.pm.point_2.x ||
1667                    (ecc->u.pm.point_2.x_len > CCP_ECC_MODULUS_BYTES) ||
1668                    !ecc->u.pm.point_2.y ||
1669                    (ecc->u.pm.point_2.y_len > CCP_ECC_MODULUS_BYTES))
1670                        return -EINVAL;
1671        } else {
1672                if (!ecc->u.pm.domain_a ||
1673                    (ecc->u.pm.domain_a_len > CCP_ECC_MODULUS_BYTES))
1674                        return -EINVAL;
1675
1676                if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT)
1677                        if (!ecc->u.pm.scalar ||
1678                            (ecc->u.pm.scalar_len > CCP_ECC_MODULUS_BYTES))
1679                                return -EINVAL;
1680        }
1681
1682        if (!ecc->u.pm.result.x ||
1683            (ecc->u.pm.result.x_len < CCP_ECC_MODULUS_BYTES) ||
1684            !ecc->u.pm.result.y ||
1685            (ecc->u.pm.result.y_len < CCP_ECC_MODULUS_BYTES))
1686                return -EINVAL;
1687
1688        memset(&op, 0, sizeof(op));
1689        op.cmd_q = cmd_q;
1690        op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1691
1692        /* Concatenate the modulus and the operands. Both the modulus and
1693         * the operands must be in little endian format.  Since the input
1694         * is in big endian format it must be converted and placed in a
1695         * fixed length buffer.
1696         */
1697        ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
1698                                   DMA_TO_DEVICE);
1699        if (ret)
1700                return ret;
1701
1702        /* Save the workarea address since it is updated in order to perform
1703         * the concatenation
1704         */
1705        save = src.address;
1706
1707        /* Copy the ECC modulus */
1708        ret = ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len,
1709                                      CCP_ECC_OPERAND_SIZE, false);
1710        if (ret)
1711                goto e_src;
1712        src.address += CCP_ECC_OPERAND_SIZE;
1713
1714        /* Copy the first point X and Y coordinate */
1715        ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.x,
1716                                      ecc->u.pm.point_1.x_len,
1717                                      CCP_ECC_OPERAND_SIZE, false);
1718        if (ret)
1719                goto e_src;
1720        src.address += CCP_ECC_OPERAND_SIZE;
1721        ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.y,
1722                                      ecc->u.pm.point_1.y_len,
1723                                      CCP_ECC_OPERAND_SIZE, false);
1724        if (ret)
1725                goto e_src;
1726        src.address += CCP_ECC_OPERAND_SIZE;
1727
1728        /* Set the first point Z coordinate to 1 */
1729        *src.address = 0x01;
1730        src.address += CCP_ECC_OPERAND_SIZE;
1731
1732        if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
1733                /* Copy the second point X and Y coordinate */
1734                ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.x,
1735                                              ecc->u.pm.point_2.x_len,
1736                                              CCP_ECC_OPERAND_SIZE, false);
1737                if (ret)
1738                        goto e_src;
1739                src.address += CCP_ECC_OPERAND_SIZE;
1740                ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.y,
1741                                              ecc->u.pm.point_2.y_len,
1742                                              CCP_ECC_OPERAND_SIZE, false);
1743                if (ret)
1744                        goto e_src;
1745                src.address += CCP_ECC_OPERAND_SIZE;
1746
1747                /* Set the second point Z coordinate to 1 */
1748                *src.address = 0x01;
1749                src.address += CCP_ECC_OPERAND_SIZE;
1750        } else {
1751                /* Copy the Domain "a" parameter */
1752                ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.domain_a,
1753                                              ecc->u.pm.domain_a_len,
1754                                              CCP_ECC_OPERAND_SIZE, false);
1755                if (ret)
1756                        goto e_src;
1757                src.address += CCP_ECC_OPERAND_SIZE;
1758
1759                if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) {
1760                        /* Copy the scalar value */
1761                        ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.scalar,
1762                                                      ecc->u.pm.scalar_len,
1763                                                      CCP_ECC_OPERAND_SIZE,
1764                                                      false);
1765                        if (ret)
1766                                goto e_src;
1767                        src.address += CCP_ECC_OPERAND_SIZE;
1768                }
1769        }
1770
1771        /* Restore the workarea address */
1772        src.address = save;
1773
1774        /* Prepare the output area for the operation */
1775        ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
1776                                   DMA_FROM_DEVICE);
1777        if (ret)
1778                goto e_src;
1779
1780        op.soc = 1;
1781        op.src.u.dma.address = src.dma.address;
1782        op.src.u.dma.offset = 0;
1783        op.src.u.dma.length = src.length;
1784        op.dst.u.dma.address = dst.dma.address;
1785        op.dst.u.dma.offset = 0;
1786        op.dst.u.dma.length = dst.length;
1787
1788        op.u.ecc.function = cmd->u.ecc.function;
1789
1790        ret = cmd_q->ccp->vdata->perform->ecc(&op);
1791        if (ret) {
1792                cmd->engine_error = cmd_q->cmd_error;
1793                goto e_dst;
1794        }
1795
1796        ecc->ecc_result = le16_to_cpup(
1797                (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
1798        if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
1799                ret = -EIO;
1800                goto e_dst;
1801        }
1802
1803        /* Save the workarea address since it is updated as we walk through
1804         * to copy the point math result
1805         */
1806        save = dst.address;
1807
1808        /* Save the ECC result X and Y coordinates */
1809        ccp_reverse_get_dm_area(&dst, ecc->u.pm.result.x,
1810                                CCP_ECC_MODULUS_BYTES);
1811        dst.address += CCP_ECC_OUTPUT_SIZE;
1812        ccp_reverse_get_dm_area(&dst, ecc->u.pm.result.y,
1813                                CCP_ECC_MODULUS_BYTES);
1814        dst.address += CCP_ECC_OUTPUT_SIZE;
1815
1816        /* Restore the workarea address */
1817        dst.address = save;
1818
1819e_dst:
1820        ccp_dm_free(&dst);
1821
1822e_src:
1823        ccp_dm_free(&src);
1824
1825        return ret;
1826}
1827
1828static int ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1829{
1830        struct ccp_ecc_engine *ecc = &cmd->u.ecc;
1831
1832        ecc->ecc_result = 0;
1833
1834        if (!ecc->mod ||
1835            (ecc->mod_len > CCP_ECC_MODULUS_BYTES))
1836                return -EINVAL;
1837
1838        switch (ecc->function) {
1839        case CCP_ECC_FUNCTION_MMUL_384BIT:
1840        case CCP_ECC_FUNCTION_MADD_384BIT:
1841        case CCP_ECC_FUNCTION_MINV_384BIT:
1842                return ccp_run_ecc_mm_cmd(cmd_q, cmd);
1843
1844        case CCP_ECC_FUNCTION_PADD_384BIT:
1845        case CCP_ECC_FUNCTION_PMUL_384BIT:
1846        case CCP_ECC_FUNCTION_PDBL_384BIT:
1847                return ccp_run_ecc_pm_cmd(cmd_q, cmd);
1848
1849        default:
1850                return -EINVAL;
1851        }
1852}
1853#endif /* RHEL_SUPPORT_CCP_CRYPTO */
1854
1855int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1856{
1857        int ret;
1858
1859        cmd->engine_error = 0;
1860        cmd_q->cmd_error = 0;
1861        cmd_q->int_rcvd = 0;
1862        cmd_q->free_slots = cmd_q->ccp->vdata->perform->get_free_slots(cmd_q);
1863
1864        switch (cmd->engine) {
1865#if RHEL_SUPPORT_CCP_CRYPTO
1866        case CCP_ENGINE_AES:
1867                ret = ccp_run_aes_cmd(cmd_q, cmd);
1868                break;
1869        case CCP_ENGINE_XTS_AES_128:
1870                ret = ccp_run_xts_aes_cmd(cmd_q, cmd);
1871                break;
1872        case CCP_ENGINE_SHA:
1873                ret = ccp_run_sha_cmd(cmd_q, cmd);
1874                break;
1875        case CCP_ENGINE_RSA:
1876                ret = ccp_run_rsa_cmd(cmd_q, cmd);
1877                break;
1878#endif /* RHEL_SUPPORT_CCP_CRYPTO */
1879        case CCP_ENGINE_PASSTHRU:
1880                if (cmd->flags & CCP_CMD_PASSTHRU_NO_DMA_MAP)
1881                        ret = ccp_run_passthru_nomap_cmd(cmd_q, cmd);
1882                else
1883                        ret = ccp_run_passthru_cmd(cmd_q, cmd);
1884                break;
1885#if RHEL_SUPPORT_CCP_CRYPTO
1886        case CCP_ENGINE_ECC:
1887                ret = ccp_run_ecc_cmd(cmd_q, cmd);
1888                break;
1889#endif /* RHEL_SUPPORT_CCP_CRYPTO */
1890        default:
1891                ret = -EINVAL;
1892        }
1893
1894        return ret;
1895}
1896