linux/drivers/crypto/keembay/keembay-ocs-hcu-core.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Intel Keem Bay OCS HCU Crypto Driver.
   4 *
   5 * Copyright (C) 2018-2020 Intel Corporation
   6 */
   7
   8#include <linux/completion.h>
   9#include <linux/delay.h>
  10#include <linux/dma-mapping.h>
  11#include <linux/interrupt.h>
  12#include <linux/module.h>
  13#include <linux/of_device.h>
  14
  15#include <crypto/engine.h>
  16#include <crypto/scatterwalk.h>
  17#include <crypto/sha2.h>
  18#include <crypto/sm3.h>
  19#include <crypto/hmac.h>
  20#include <crypto/internal/hash.h>
  21
  22#include "ocs-hcu.h"
  23
  24#define DRV_NAME        "keembay-ocs-hcu"
  25
  26/* Flag marking a final request. */
  27#define REQ_FINAL                       BIT(0)
  28/* Flag marking a HMAC request. */
  29#define REQ_FLAGS_HMAC                  BIT(1)
  30/* Flag set when HW HMAC is being used. */
  31#define REQ_FLAGS_HMAC_HW               BIT(2)
  32/* Flag set when SW HMAC is being used. */
  33#define REQ_FLAGS_HMAC_SW               BIT(3)
  34
  35/**
  36 * struct ocs_hcu_ctx: OCS HCU Transform context.
  37 * @engine_ctx:  Crypto Engine context.
  38 * @hcu_dev:     The OCS HCU device used by the transformation.
  39 * @key:         The key (used only for HMAC transformations).
  40 * @key_len:     The length of the key.
  41 * @is_sm3_tfm:  Whether or not this is an SM3 transformation.
  42 * @is_hmac_tfm: Whether or not this is a HMAC transformation.
  43 */
  44struct ocs_hcu_ctx {
  45        struct crypto_engine_ctx engine_ctx;
  46        struct ocs_hcu_dev *hcu_dev;
  47        u8 key[SHA512_BLOCK_SIZE];
  48        size_t key_len;
  49        bool is_sm3_tfm;
  50        bool is_hmac_tfm;
  51};
  52
  53/**
  54 * struct ocs_hcu_rctx - Context for the request.
  55 * @hcu_dev:        OCS HCU device to be used to service the request.
  56 * @flags:          Flags tracking request status.
  57 * @algo:           Algorithm to use for the request.
  58 * @blk_sz:         Block size of the transformation / request.
  59 * @dig_sz:         Digest size of the transformation / request.
  60 * @dma_list:       OCS DMA linked list.
  61 * @hash_ctx:       OCS HCU hashing context.
  62 * @buffer:         Buffer to store: partial block of data and SW HMAC
  63 *                  artifacts (ipad, opad, etc.).
  64 * @buf_cnt:        Number of bytes currently stored in the buffer.
  65 * @buf_dma_addr:   The DMA address of @buffer (when mapped).
  66 * @buf_dma_count:  The number of bytes in @buffer currently DMA-mapped.
  67 * @sg:             Head of the scatterlist entries containing data.
  68 * @sg_data_total:  Total data in the SG list at any time.
  69 * @sg_data_offset: Offset into the data of the current individual SG node.
  70 * @sg_dma_nents:   Number of sg entries mapped in dma_list.
  71 */
  72struct ocs_hcu_rctx {
  73        struct ocs_hcu_dev      *hcu_dev;
  74        u32                     flags;
  75        enum ocs_hcu_algo       algo;
  76        size_t                  blk_sz;
  77        size_t                  dig_sz;
  78        struct ocs_hcu_dma_list *dma_list;
  79        struct ocs_hcu_hash_ctx hash_ctx;
  80        /*
  81         * Buffer is double the block size because we need space for SW HMAC
  82         * artifacts, i.e:
  83         * - ipad (1 block) + a possible partial block of data.
  84         * - opad (1 block) + digest of H(k ^ ipad || m)
  85         */
  86        u8                      buffer[2 * SHA512_BLOCK_SIZE];
  87        size_t                  buf_cnt;
  88        dma_addr_t              buf_dma_addr;
  89        size_t                  buf_dma_count;
  90        struct scatterlist      *sg;
  91        unsigned int            sg_data_total;
  92        unsigned int            sg_data_offset;
  93        unsigned int            sg_dma_nents;
  94};
  95
  96/**
  97 * struct ocs_hcu_drv - Driver data
  98 * @dev_list:   The list of HCU devices.
  99 * @lock:       The lock protecting dev_list.
 100 */
 101struct ocs_hcu_drv {
 102        struct list_head dev_list;
 103        spinlock_t lock; /* Protects dev_list. */
 104};
 105
 106static struct ocs_hcu_drv ocs_hcu = {
 107        .dev_list = LIST_HEAD_INIT(ocs_hcu.dev_list),
 108        .lock = __SPIN_LOCK_UNLOCKED(ocs_hcu.lock),
 109};
 110
 111/*
 112 * Return the total amount of data in the request; that is: the data in the
 113 * request buffer + the data in the sg list.
 114 */
 115static inline unsigned int kmb_get_total_data(struct ocs_hcu_rctx *rctx)
 116{
 117        return rctx->sg_data_total + rctx->buf_cnt;
 118}
 119
 120/* Move remaining content of scatter-gather list to context buffer. */
 121static int flush_sg_to_ocs_buffer(struct ocs_hcu_rctx *rctx)
 122{
 123        size_t count;
 124
 125        if (rctx->sg_data_total > (sizeof(rctx->buffer) - rctx->buf_cnt)) {
 126                WARN(1, "%s: sg data does not fit in buffer\n", __func__);
 127                return -EINVAL;
 128        }
 129
 130        while (rctx->sg_data_total) {
 131                if (!rctx->sg) {
 132                        WARN(1, "%s: unexpected NULL sg\n", __func__);
 133                        return -EINVAL;
 134                }
 135                /*
 136                 * If current sg has been fully processed, skip to the next
 137                 * one.
 138                 */
 139                if (rctx->sg_data_offset == rctx->sg->length) {
 140                        rctx->sg = sg_next(rctx->sg);
 141                        rctx->sg_data_offset = 0;
 142                        continue;
 143                }
 144                /*
 145                 * Determine the maximum data available to copy from the node.
 146                 * Minimum of the length left in the sg node, or the total data
 147                 * in the request.
 148                 */
 149                count = min(rctx->sg->length - rctx->sg_data_offset,
 150                            rctx->sg_data_total);
 151                /* Copy from scatter-list entry to context buffer. */
 152                scatterwalk_map_and_copy(&rctx->buffer[rctx->buf_cnt],
 153                                         rctx->sg, rctx->sg_data_offset,
 154                                         count, 0);
 155
 156                rctx->sg_data_offset += count;
 157                rctx->sg_data_total -= count;
 158                rctx->buf_cnt += count;
 159        }
 160
 161        return 0;
 162}
 163
 164static struct ocs_hcu_dev *kmb_ocs_hcu_find_dev(struct ahash_request *req)
 165{
 166        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 167        struct ocs_hcu_ctx *tctx = crypto_ahash_ctx(tfm);
 168
 169        /* If the HCU device for the request was previously set, return it. */
 170        if (tctx->hcu_dev)
 171                return tctx->hcu_dev;
 172
 173        /*
 174         * Otherwise, get the first HCU device available (there should be one
 175         * and only one device).
 176         */
 177        spin_lock_bh(&ocs_hcu.lock);
 178        tctx->hcu_dev = list_first_entry_or_null(&ocs_hcu.dev_list,
 179                                                 struct ocs_hcu_dev,
 180                                                 list);
 181        spin_unlock_bh(&ocs_hcu.lock);
 182
 183        return tctx->hcu_dev;
 184}
 185
 186/* Free OCS DMA linked list and DMA-able context buffer. */
 187static void kmb_ocs_hcu_dma_cleanup(struct ahash_request *req,
 188                                    struct ocs_hcu_rctx *rctx)
 189{
 190        struct ocs_hcu_dev *hcu_dev = rctx->hcu_dev;
 191        struct device *dev = hcu_dev->dev;
 192
 193        /* Unmap rctx->buffer (if mapped). */
 194        if (rctx->buf_dma_count) {
 195                dma_unmap_single(dev, rctx->buf_dma_addr, rctx->buf_dma_count,
 196                                 DMA_TO_DEVICE);
 197                rctx->buf_dma_count = 0;
 198        }
 199
 200        /* Unmap req->src (if mapped). */
 201        if (rctx->sg_dma_nents) {
 202                dma_unmap_sg(dev, req->src, rctx->sg_dma_nents, DMA_TO_DEVICE);
 203                rctx->sg_dma_nents = 0;
 204        }
 205
 206        /* Free dma_list (if allocated). */
 207        if (rctx->dma_list) {
 208                ocs_hcu_dma_list_free(hcu_dev, rctx->dma_list);
 209                rctx->dma_list = NULL;
 210        }
 211}
 212
 213/*
 214 * Prepare for DMA operation:
 215 * - DMA-map request context buffer (if needed)
 216 * - DMA-map SG list (only the entries to be processed, see note below)
 217 * - Allocate OCS HCU DMA linked list (number of elements =  SG entries to
 218 *   process + context buffer (if not empty)).
 219 * - Add DMA-mapped request context buffer to OCS HCU DMA list.
 220 * - Add SG entries to DMA list.
 221 *
 222 * Note: if this is a final request, we process all the data in the SG list,
 223 * otherwise we can only process up to the maximum amount of block-aligned data
 224 * (the remainder will be put into the context buffer and processed in the next
 225 * request).
 226 */
 227static int kmb_ocs_dma_prepare(struct ahash_request *req)
 228{
 229        struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
 230        struct device *dev = rctx->hcu_dev->dev;
 231        unsigned int remainder = 0;
 232        unsigned int total;
 233        size_t nents;
 234        size_t count;
 235        int rc;
 236        int i;
 237
 238        /* This function should be called only when there is data to process. */
 239        total = kmb_get_total_data(rctx);
 240        if (!total)
 241                return -EINVAL;
 242
 243        /*
 244         * If this is not a final DMA (terminated DMA), the data passed to the
 245         * HCU must be aligned to the block size; compute the remainder data to
 246         * be processed in the next request.
 247         */
 248        if (!(rctx->flags & REQ_FINAL))
 249                remainder = total % rctx->blk_sz;
 250
 251        /* Determine the number of scatter gather list entries to process. */
 252        nents = sg_nents_for_len(req->src, rctx->sg_data_total - remainder);
 253
 254        /* If there are entries to process, map them. */
 255        if (nents) {
 256                rctx->sg_dma_nents = dma_map_sg(dev, req->src, nents,
 257                                                DMA_TO_DEVICE);
 258                if (!rctx->sg_dma_nents) {
 259                        dev_err(dev, "Failed to MAP SG\n");
 260                        rc = -ENOMEM;
 261                        goto cleanup;
 262                }
 263                /*
 264                 * The value returned by dma_map_sg() can be < nents; so update
 265                 * nents accordingly.
 266                 */
 267                nents = rctx->sg_dma_nents;
 268        }
 269
 270        /*
 271         * If context buffer is not empty, map it and add extra DMA entry for
 272         * it.
 273         */
 274        if (rctx->buf_cnt) {
 275                rctx->buf_dma_addr = dma_map_single(dev, rctx->buffer,
 276                                                    rctx->buf_cnt,
 277                                                    DMA_TO_DEVICE);
 278                if (dma_mapping_error(dev, rctx->buf_dma_addr)) {
 279                        dev_err(dev, "Failed to map request context buffer\n");
 280                        rc = -ENOMEM;
 281                        goto cleanup;
 282                }
 283                rctx->buf_dma_count = rctx->buf_cnt;
 284                /* Increase number of dma entries. */
 285                nents++;
 286        }
 287
 288        /* Allocate OCS HCU DMA list. */
 289        rctx->dma_list = ocs_hcu_dma_list_alloc(rctx->hcu_dev, nents);
 290        if (!rctx->dma_list) {
 291                rc = -ENOMEM;
 292                goto cleanup;
 293        }
 294
 295        /* Add request context buffer (if previously DMA-mapped) */
 296        if (rctx->buf_dma_count) {
 297                rc = ocs_hcu_dma_list_add_tail(rctx->hcu_dev, rctx->dma_list,
 298                                               rctx->buf_dma_addr,
 299                                               rctx->buf_dma_count);
 300                if (rc)
 301                        goto cleanup;
 302        }
 303
 304        /* Add the SG nodes to be processed to the DMA linked list. */
 305        for_each_sg(req->src, rctx->sg, rctx->sg_dma_nents, i) {
 306                /*
 307                 * The number of bytes to add to the list entry is the minimum
 308                 * between:
 309                 * - The DMA length of the SG entry.
 310                 * - The data left to be processed.
 311                 */
 312                count = min(rctx->sg_data_total - remainder,
 313                            sg_dma_len(rctx->sg) - rctx->sg_data_offset);
 314                /*
 315                 * Do not create a zero length DMA descriptor. Check in case of
 316                 * zero length SG node.
 317                 */
 318                if (count == 0)
 319                        continue;
 320                /* Add sg to HCU DMA list. */
 321                rc = ocs_hcu_dma_list_add_tail(rctx->hcu_dev,
 322                                               rctx->dma_list,
 323                                               rctx->sg->dma_address,
 324                                               count);
 325                if (rc)
 326                        goto cleanup;
 327
 328                /* Update amount of data remaining in SG list. */
 329                rctx->sg_data_total -= count;
 330
 331                /*
 332                 * If  remaining data is equal to remainder (note: 'less than'
 333                 * case should never happen in practice), we are done: update
 334                 * offset and exit the loop.
 335                 */
 336                if (rctx->sg_data_total <= remainder) {
 337                        WARN_ON(rctx->sg_data_total < remainder);
 338                        rctx->sg_data_offset += count;
 339                        break;
 340                }
 341
 342                /*
 343                 * If we get here is because we need to process the next sg in
 344                 * the list; set offset within the sg to 0.
 345                 */
 346                rctx->sg_data_offset = 0;
 347        }
 348
 349        return 0;
 350cleanup:
 351        dev_err(dev, "Failed to prepare DMA.\n");
 352        kmb_ocs_hcu_dma_cleanup(req, rctx);
 353
 354        return rc;
 355}
 356
 357static void kmb_ocs_hcu_secure_cleanup(struct ahash_request *req)
 358{
 359        struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
 360
 361        /* Clear buffer of any data. */
 362        memzero_explicit(rctx->buffer, sizeof(rctx->buffer));
 363}
 364
 365static int kmb_ocs_hcu_handle_queue(struct ahash_request *req)
 366{
 367        struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
 368
 369        if (!hcu_dev)
 370                return -ENOENT;
 371
 372        return crypto_transfer_hash_request_to_engine(hcu_dev->engine, req);
 373}
 374
 375static int prepare_ipad(struct ahash_request *req)
 376{
 377        struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
 378        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 379        struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
 380        int i;
 381
 382        WARN(rctx->buf_cnt, "%s: Context buffer is not empty\n", __func__);
 383        WARN(!(rctx->flags & REQ_FLAGS_HMAC_SW),
 384             "%s: HMAC_SW flag is not set\n", __func__);
 385        /*
 386         * Key length must be equal to block size. If key is shorter,
 387         * we pad it with zero (note: key cannot be longer, since
 388         * longer keys are hashed by kmb_ocs_hcu_setkey()).
 389         */
 390        if (ctx->key_len > rctx->blk_sz) {
 391                WARN(1, "%s: Invalid key length in tfm context\n", __func__);
 392                return -EINVAL;
 393        }
 394        memzero_explicit(&ctx->key[ctx->key_len],
 395                         rctx->blk_sz - ctx->key_len);
 396        ctx->key_len = rctx->blk_sz;
 397        /*
 398         * Prepare IPAD for HMAC. Only done for first block.
 399         * HMAC(k,m) = H(k ^ opad || H(k ^ ipad || m))
 400         * k ^ ipad will be first hashed block.
 401         * k ^ opad will be calculated in the final request.
 402         * Only needed if not using HW HMAC.
 403         */
 404        for (i = 0; i < rctx->blk_sz; i++)
 405                rctx->buffer[i] = ctx->key[i] ^ HMAC_IPAD_VALUE;
 406        rctx->buf_cnt = rctx->blk_sz;
 407
 408        return 0;
 409}
 410
 411static int kmb_ocs_hcu_do_one_request(struct crypto_engine *engine, void *areq)
 412{
 413        struct ahash_request *req = container_of(areq, struct ahash_request,
 414                                                 base);
 415        struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
 416        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 417        struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
 418        struct ocs_hcu_ctx *tctx = crypto_ahash_ctx(tfm);
 419        int rc;
 420        int i;
 421
 422        if (!hcu_dev) {
 423                rc = -ENOENT;
 424                goto error;
 425        }
 426
 427        /*
 428         * If hardware HMAC flag is set, perform HMAC in hardware.
 429         *
 430         * NOTE: this flag implies REQ_FINAL && kmb_get_total_data(rctx)
 431         */
 432        if (rctx->flags & REQ_FLAGS_HMAC_HW) {
 433                /* Map input data into the HCU DMA linked list. */
 434                rc = kmb_ocs_dma_prepare(req);
 435                if (rc)
 436                        goto error;
 437
 438                rc = ocs_hcu_hmac(hcu_dev, rctx->algo, tctx->key, tctx->key_len,
 439                                  rctx->dma_list, req->result, rctx->dig_sz);
 440
 441                /* Unmap data and free DMA list regardless of return code. */
 442                kmb_ocs_hcu_dma_cleanup(req, rctx);
 443
 444                /* Process previous return code. */
 445                if (rc)
 446                        goto error;
 447
 448                goto done;
 449        }
 450
 451        /* Handle update request case. */
 452        if (!(rctx->flags & REQ_FINAL)) {
 453                /* Update should always have input data. */
 454                if (!kmb_get_total_data(rctx))
 455                        return -EINVAL;
 456
 457                /* Map input data into the HCU DMA linked list. */
 458                rc = kmb_ocs_dma_prepare(req);
 459                if (rc)
 460                        goto error;
 461
 462                /* Do hashing step. */
 463                rc = ocs_hcu_hash_update(hcu_dev, &rctx->hash_ctx,
 464                                         rctx->dma_list);
 465
 466                /* Unmap data and free DMA list regardless of return code. */
 467                kmb_ocs_hcu_dma_cleanup(req, rctx);
 468
 469                /* Process previous return code. */
 470                if (rc)
 471                        goto error;
 472
 473                /*
 474                 * Reset request buffer count (data in the buffer was just
 475                 * processed).
 476                 */
 477                rctx->buf_cnt = 0;
 478                /*
 479                 * Move remaining sg data into the request buffer, so that it
 480                 * will be processed during the next request.
 481                 *
 482                 * NOTE: we have remaining data if kmb_get_total_data() was not
 483                 * a multiple of block size.
 484                 */
 485                rc = flush_sg_to_ocs_buffer(rctx);
 486                if (rc)
 487                        goto error;
 488
 489                goto done;
 490        }
 491
 492        /* If we get here, this is a final request. */
 493
 494        /* If there is data to process, use finup. */
 495        if (kmb_get_total_data(rctx)) {
 496                /* Map input data into the HCU DMA linked list. */
 497                rc = kmb_ocs_dma_prepare(req);
 498                if (rc)
 499                        goto error;
 500
 501                /* Do hashing step. */
 502                rc = ocs_hcu_hash_finup(hcu_dev, &rctx->hash_ctx,
 503                                        rctx->dma_list,
 504                                        req->result, rctx->dig_sz);
 505                /* Free DMA list regardless of return code. */
 506                kmb_ocs_hcu_dma_cleanup(req, rctx);
 507
 508                /* Process previous return code. */
 509                if (rc)
 510                        goto error;
 511
 512        } else {  /* Otherwise (if we have no data), use final. */
 513                rc = ocs_hcu_hash_final(hcu_dev, &rctx->hash_ctx, req->result,
 514                                        rctx->dig_sz);
 515                if (rc)
 516                        goto error;
 517        }
 518
 519        /*
 520         * If we are finalizing a SW HMAC request, we just computed the result
 521         * of: H(k ^ ipad || m).
 522         *
 523         * We now need to complete the HMAC calculation with the OPAD step,
 524         * that is, we need to compute H(k ^ opad || digest), where digest is
 525         * the digest we just obtained, i.e., H(k ^ ipad || m).
 526         */
 527        if (rctx->flags & REQ_FLAGS_HMAC_SW) {
 528                /*
 529                 * Compute k ^ opad and store it in the request buffer (which
 530                 * is not used anymore at this point).
 531                 * Note: key has been padded / hashed already (so keylen ==
 532                 * blksz) .
 533                 */
 534                WARN_ON(tctx->key_len != rctx->blk_sz);
 535                for (i = 0; i < rctx->blk_sz; i++)
 536                        rctx->buffer[i] = tctx->key[i] ^ HMAC_OPAD_VALUE;
 537                /* Now append the digest to the rest of the buffer. */
 538                for (i = 0; (i < rctx->dig_sz); i++)
 539                        rctx->buffer[rctx->blk_sz + i] = req->result[i];
 540
 541                /* Now hash the buffer to obtain the final HMAC. */
 542                rc = ocs_hcu_digest(hcu_dev, rctx->algo, rctx->buffer,
 543                                    rctx->blk_sz + rctx->dig_sz, req->result,
 544                                    rctx->dig_sz);
 545                if (rc)
 546                        goto error;
 547        }
 548
 549        /* Perform secure clean-up. */
 550        kmb_ocs_hcu_secure_cleanup(req);
 551done:
 552        crypto_finalize_hash_request(hcu_dev->engine, req, 0);
 553
 554        return 0;
 555
 556error:
 557        kmb_ocs_hcu_secure_cleanup(req);
 558        return rc;
 559}
 560
 561static int kmb_ocs_hcu_init(struct ahash_request *req)
 562{
 563        struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
 564        struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
 565        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 566        struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
 567
 568        if (!hcu_dev)
 569                return -ENOENT;
 570
 571        /* Initialize entire request context to zero. */
 572        memset(rctx, 0, sizeof(*rctx));
 573
 574        rctx->hcu_dev = hcu_dev;
 575        rctx->dig_sz = crypto_ahash_digestsize(tfm);
 576
 577        switch (rctx->dig_sz) {
 578#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
 579        case SHA224_DIGEST_SIZE:
 580                rctx->blk_sz = SHA224_BLOCK_SIZE;
 581                rctx->algo = OCS_HCU_ALGO_SHA224;
 582                break;
 583#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224 */
 584        case SHA256_DIGEST_SIZE:
 585                rctx->blk_sz = SHA256_BLOCK_SIZE;
 586                /*
 587                 * SHA256 and SM3 have the same digest size: use info from tfm
 588                 * context to find out which one we should use.
 589                 */
 590                rctx->algo = ctx->is_sm3_tfm ? OCS_HCU_ALGO_SM3 :
 591                                               OCS_HCU_ALGO_SHA256;
 592                break;
 593        case SHA384_DIGEST_SIZE:
 594                rctx->blk_sz = SHA384_BLOCK_SIZE;
 595                rctx->algo = OCS_HCU_ALGO_SHA384;
 596                break;
 597        case SHA512_DIGEST_SIZE:
 598                rctx->blk_sz = SHA512_BLOCK_SIZE;
 599                rctx->algo = OCS_HCU_ALGO_SHA512;
 600                break;
 601        default:
 602                return -EINVAL;
 603        }
 604
 605        /* Initialize intermediate data. */
 606        ocs_hcu_hash_init(&rctx->hash_ctx, rctx->algo);
 607
 608        /* If this a HMAC request, set HMAC flag. */
 609        if (ctx->is_hmac_tfm)
 610                rctx->flags |= REQ_FLAGS_HMAC;
 611
 612        return 0;
 613}
 614
 615static int kmb_ocs_hcu_update(struct ahash_request *req)
 616{
 617        struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
 618        int rc;
 619
 620        if (!req->nbytes)
 621                return 0;
 622
 623        rctx->sg_data_total = req->nbytes;
 624        rctx->sg_data_offset = 0;
 625        rctx->sg = req->src;
 626
 627        /*
 628         * If we are doing HMAC, then we must use SW-assisted HMAC, since HW
 629         * HMAC does not support context switching (there it can only be used
 630         * with finup() or digest()).
 631         */
 632        if (rctx->flags & REQ_FLAGS_HMAC &&
 633            !(rctx->flags & REQ_FLAGS_HMAC_SW)) {
 634                rctx->flags |= REQ_FLAGS_HMAC_SW;
 635                rc = prepare_ipad(req);
 636                if (rc)
 637                        return rc;
 638        }
 639
 640        /*
 641         * If remaining sg_data fits into ctx buffer, just copy it there; we'll
 642         * process it at the next update() or final().
 643         */
 644        if (rctx->sg_data_total <= (sizeof(rctx->buffer) - rctx->buf_cnt))
 645                return flush_sg_to_ocs_buffer(rctx);
 646
 647        return kmb_ocs_hcu_handle_queue(req);
 648}
 649
 650/* Common logic for kmb_ocs_hcu_final() and kmb_ocs_hcu_finup(). */
 651static int kmb_ocs_hcu_fin_common(struct ahash_request *req)
 652{
 653        struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
 654        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 655        struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
 656        int rc;
 657
 658        rctx->flags |= REQ_FINAL;
 659
 660        /*
 661         * If this is a HMAC request and, so far, we didn't have to switch to
 662         * SW HMAC, check if we can use HW HMAC.
 663         */
 664        if (rctx->flags & REQ_FLAGS_HMAC &&
 665            !(rctx->flags & REQ_FLAGS_HMAC_SW)) {
 666                /*
 667                 * If we are here, it means we never processed any data so far,
 668                 * so we can use HW HMAC, but only if there is some data to
 669                 * process (since OCS HW MAC does not support zero-length
 670                 * messages) and the key length is supported by the hardware
 671                 * (OCS HCU HW only supports length <= 64); if HW HMAC cannot
 672                 * be used, fall back to SW-assisted HMAC.
 673                 */
 674                if (kmb_get_total_data(rctx) &&
 675                    ctx->key_len <= OCS_HCU_HW_KEY_LEN) {
 676                        rctx->flags |= REQ_FLAGS_HMAC_HW;
 677                } else {
 678                        rctx->flags |= REQ_FLAGS_HMAC_SW;
 679                        rc = prepare_ipad(req);
 680                        if (rc)
 681                                return rc;
 682                }
 683        }
 684
 685        return kmb_ocs_hcu_handle_queue(req);
 686}
 687
 688static int kmb_ocs_hcu_final(struct ahash_request *req)
 689{
 690        struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
 691
 692        rctx->sg_data_total = 0;
 693        rctx->sg_data_offset = 0;
 694        rctx->sg = NULL;
 695
 696        return kmb_ocs_hcu_fin_common(req);
 697}
 698
 699static int kmb_ocs_hcu_finup(struct ahash_request *req)
 700{
 701        struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
 702
 703        rctx->sg_data_total = req->nbytes;
 704        rctx->sg_data_offset = 0;
 705        rctx->sg = req->src;
 706
 707        return kmb_ocs_hcu_fin_common(req);
 708}
 709
 710static int kmb_ocs_hcu_digest(struct ahash_request *req)
 711{
 712        int rc = 0;
 713        struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
 714
 715        if (!hcu_dev)
 716                return -ENOENT;
 717
 718        rc = kmb_ocs_hcu_init(req);
 719        if (rc)
 720                return rc;
 721
 722        rc = kmb_ocs_hcu_finup(req);
 723
 724        return rc;
 725}
 726
 727static int kmb_ocs_hcu_export(struct ahash_request *req, void *out)
 728{
 729        struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
 730
 731        /* Intermediate data is always stored and applied per request. */
 732        memcpy(out, rctx, sizeof(*rctx));
 733
 734        return 0;
 735}
 736
 737static int kmb_ocs_hcu_import(struct ahash_request *req, const void *in)
 738{
 739        struct ocs_hcu_rctx *rctx = ahash_request_ctx(req);
 740
 741        /* Intermediate data is always stored and applied per request. */
 742        memcpy(rctx, in, sizeof(*rctx));
 743
 744        return 0;
 745}
 746
 747static int kmb_ocs_hcu_setkey(struct crypto_ahash *tfm, const u8 *key,
 748                              unsigned int keylen)
 749{
 750        unsigned int digestsize = crypto_ahash_digestsize(tfm);
 751        struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
 752        size_t blk_sz = crypto_ahash_blocksize(tfm);
 753        struct crypto_ahash *ahash_tfm;
 754        struct ahash_request *req;
 755        struct crypto_wait wait;
 756        struct scatterlist sg;
 757        const char *alg_name;
 758        int rc;
 759
 760        /*
 761         * Key length must be equal to block size:
 762         * - If key is shorter, we are done for now (the key will be padded
 763         *   later on); this is to maximize the use of HW HMAC (which works
 764         *   only for keys <= 64 bytes).
 765         * - If key is longer, we hash it.
 766         */
 767        if (keylen <= blk_sz) {
 768                memcpy(ctx->key, key, keylen);
 769                ctx->key_len = keylen;
 770                return 0;
 771        }
 772
 773        switch (digestsize) {
 774#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
 775        case SHA224_DIGEST_SIZE:
 776                alg_name = "sha224-keembay-ocs";
 777                break;
 778#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224 */
 779        case SHA256_DIGEST_SIZE:
 780                alg_name = ctx->is_sm3_tfm ? "sm3-keembay-ocs" :
 781                                             "sha256-keembay-ocs";
 782                break;
 783        case SHA384_DIGEST_SIZE:
 784                alg_name = "sha384-keembay-ocs";
 785                break;
 786        case SHA512_DIGEST_SIZE:
 787                alg_name = "sha512-keembay-ocs";
 788                break;
 789        default:
 790                return -EINVAL;
 791        }
 792
 793        ahash_tfm = crypto_alloc_ahash(alg_name, 0, 0);
 794        if (IS_ERR(ahash_tfm))
 795                return PTR_ERR(ahash_tfm);
 796
 797        req = ahash_request_alloc(ahash_tfm, GFP_KERNEL);
 798        if (!req) {
 799                rc = -ENOMEM;
 800                goto err_free_ahash;
 801        }
 802
 803        crypto_init_wait(&wait);
 804        ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
 805                                   crypto_req_done, &wait);
 806        crypto_ahash_clear_flags(ahash_tfm, ~0);
 807
 808        sg_init_one(&sg, key, keylen);
 809        ahash_request_set_crypt(req, &sg, ctx->key, keylen);
 810
 811        rc = crypto_wait_req(crypto_ahash_digest(req), &wait);
 812        if (rc == 0)
 813                ctx->key_len = digestsize;
 814
 815        ahash_request_free(req);
 816err_free_ahash:
 817        crypto_free_ahash(ahash_tfm);
 818
 819        return rc;
 820}
 821
 822/* Set request size and initialize tfm context. */
 823static void __cra_init(struct crypto_tfm *tfm, struct ocs_hcu_ctx *ctx)
 824{
 825        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 826                                 sizeof(struct ocs_hcu_rctx));
 827
 828        /* Init context to 0. */
 829        memzero_explicit(ctx, sizeof(*ctx));
 830        /* Set engine ops. */
 831        ctx->engine_ctx.op.do_one_request = kmb_ocs_hcu_do_one_request;
 832}
 833
 834static int kmb_ocs_hcu_sha_cra_init(struct crypto_tfm *tfm)
 835{
 836        struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
 837
 838        __cra_init(tfm, ctx);
 839
 840        return 0;
 841}
 842
 843static int kmb_ocs_hcu_sm3_cra_init(struct crypto_tfm *tfm)
 844{
 845        struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
 846
 847        __cra_init(tfm, ctx);
 848
 849        ctx->is_sm3_tfm = true;
 850
 851        return 0;
 852}
 853
 854static int kmb_ocs_hcu_hmac_sm3_cra_init(struct crypto_tfm *tfm)
 855{
 856        struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
 857
 858        __cra_init(tfm, ctx);
 859
 860        ctx->is_sm3_tfm = true;
 861        ctx->is_hmac_tfm = true;
 862
 863        return 0;
 864}
 865
 866static int kmb_ocs_hcu_hmac_cra_init(struct crypto_tfm *tfm)
 867{
 868        struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
 869
 870        __cra_init(tfm, ctx);
 871
 872        ctx->is_hmac_tfm = true;
 873
 874        return 0;
 875}
 876
 877/* Function called when 'tfm' is de-initialized. */
 878static void kmb_ocs_hcu_hmac_cra_exit(struct crypto_tfm *tfm)
 879{
 880        struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
 881
 882        /* Clear the key. */
 883        memzero_explicit(ctx->key, sizeof(ctx->key));
 884}
 885
 886static struct ahash_alg ocs_hcu_algs[] = {
 887#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
 888{
 889        .init           = kmb_ocs_hcu_init,
 890        .update         = kmb_ocs_hcu_update,
 891        .final          = kmb_ocs_hcu_final,
 892        .finup          = kmb_ocs_hcu_finup,
 893        .digest         = kmb_ocs_hcu_digest,
 894        .export         = kmb_ocs_hcu_export,
 895        .import         = kmb_ocs_hcu_import,
 896        .halg = {
 897                .digestsize     = SHA224_DIGEST_SIZE,
 898                .statesize      = sizeof(struct ocs_hcu_rctx),
 899                .base   = {
 900                        .cra_name               = "sha224",
 901                        .cra_driver_name        = "sha224-keembay-ocs",
 902                        .cra_priority           = 255,
 903                        .cra_flags              = CRYPTO_ALG_ASYNC,
 904                        .cra_blocksize          = SHA224_BLOCK_SIZE,
 905                        .cra_ctxsize            = sizeof(struct ocs_hcu_ctx),
 906                        .cra_alignmask          = 0,
 907                        .cra_module             = THIS_MODULE,
 908                        .cra_init               = kmb_ocs_hcu_sha_cra_init,
 909                }
 910        }
 911},
 912{
 913        .init           = kmb_ocs_hcu_init,
 914        .update         = kmb_ocs_hcu_update,
 915        .final          = kmb_ocs_hcu_final,
 916        .finup          = kmb_ocs_hcu_finup,
 917        .digest         = kmb_ocs_hcu_digest,
 918        .export         = kmb_ocs_hcu_export,
 919        .import         = kmb_ocs_hcu_import,
 920        .setkey         = kmb_ocs_hcu_setkey,
 921        .halg = {
 922                .digestsize     = SHA224_DIGEST_SIZE,
 923                .statesize      = sizeof(struct ocs_hcu_rctx),
 924                .base   = {
 925                        .cra_name               = "hmac(sha224)",
 926                        .cra_driver_name        = "hmac-sha224-keembay-ocs",
 927                        .cra_priority           = 255,
 928                        .cra_flags              = CRYPTO_ALG_ASYNC,
 929                        .cra_blocksize          = SHA224_BLOCK_SIZE,
 930                        .cra_ctxsize            = sizeof(struct ocs_hcu_ctx),
 931                        .cra_alignmask          = 0,
 932                        .cra_module             = THIS_MODULE,
 933                        .cra_init               = kmb_ocs_hcu_hmac_cra_init,
 934                        .cra_exit               = kmb_ocs_hcu_hmac_cra_exit,
 935                }
 936        }
 937},
 938#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224 */
 939{
 940        .init           = kmb_ocs_hcu_init,
 941        .update         = kmb_ocs_hcu_update,
 942        .final          = kmb_ocs_hcu_final,
 943        .finup          = kmb_ocs_hcu_finup,
 944        .digest         = kmb_ocs_hcu_digest,
 945        .export         = kmb_ocs_hcu_export,
 946        .import         = kmb_ocs_hcu_import,
 947        .halg = {
 948                .digestsize     = SHA256_DIGEST_SIZE,
 949                .statesize      = sizeof(struct ocs_hcu_rctx),
 950                .base   = {
 951                        .cra_name               = "sha256",
 952                        .cra_driver_name        = "sha256-keembay-ocs",
 953                        .cra_priority           = 255,
 954                        .cra_flags              = CRYPTO_ALG_ASYNC,
 955                        .cra_blocksize          = SHA256_BLOCK_SIZE,
 956                        .cra_ctxsize            = sizeof(struct ocs_hcu_ctx),
 957                        .cra_alignmask          = 0,
 958                        .cra_module             = THIS_MODULE,
 959                        .cra_init               = kmb_ocs_hcu_sha_cra_init,
 960                }
 961        }
 962},
 963{
 964        .init           = kmb_ocs_hcu_init,
 965        .update         = kmb_ocs_hcu_update,
 966        .final          = kmb_ocs_hcu_final,
 967        .finup          = kmb_ocs_hcu_finup,
 968        .digest         = kmb_ocs_hcu_digest,
 969        .export         = kmb_ocs_hcu_export,
 970        .import         = kmb_ocs_hcu_import,
 971        .setkey         = kmb_ocs_hcu_setkey,
 972        .halg = {
 973                .digestsize     = SHA256_DIGEST_SIZE,
 974                .statesize      = sizeof(struct ocs_hcu_rctx),
 975                .base   = {
 976                        .cra_name               = "hmac(sha256)",
 977                        .cra_driver_name        = "hmac-sha256-keembay-ocs",
 978                        .cra_priority           = 255,
 979                        .cra_flags              = CRYPTO_ALG_ASYNC,
 980                        .cra_blocksize          = SHA256_BLOCK_SIZE,
 981                        .cra_ctxsize            = sizeof(struct ocs_hcu_ctx),
 982                        .cra_alignmask          = 0,
 983                        .cra_module             = THIS_MODULE,
 984                        .cra_init               = kmb_ocs_hcu_hmac_cra_init,
 985                        .cra_exit               = kmb_ocs_hcu_hmac_cra_exit,
 986                }
 987        }
 988},
 989{
 990        .init           = kmb_ocs_hcu_init,
 991        .update         = kmb_ocs_hcu_update,
 992        .final          = kmb_ocs_hcu_final,
 993        .finup          = kmb_ocs_hcu_finup,
 994        .digest         = kmb_ocs_hcu_digest,
 995        .export         = kmb_ocs_hcu_export,
 996        .import         = kmb_ocs_hcu_import,
 997        .halg = {
 998                .digestsize     = SM3_DIGEST_SIZE,
 999                .statesize      = sizeof(struct ocs_hcu_rctx),
1000                .base   = {
1001                        .cra_name               = "sm3",
1002                        .cra_driver_name        = "sm3-keembay-ocs",
1003                        .cra_priority           = 255,
1004                        .cra_flags              = CRYPTO_ALG_ASYNC,
1005                        .cra_blocksize          = SM3_BLOCK_SIZE,
1006                        .cra_ctxsize            = sizeof(struct ocs_hcu_ctx),
1007                        .cra_alignmask          = 0,
1008                        .cra_module             = THIS_MODULE,
1009                        .cra_init               = kmb_ocs_hcu_sm3_cra_init,
1010                }
1011        }
1012},
1013{
1014        .init           = kmb_ocs_hcu_init,
1015        .update         = kmb_ocs_hcu_update,
1016        .final          = kmb_ocs_hcu_final,
1017        .finup          = kmb_ocs_hcu_finup,
1018        .digest         = kmb_ocs_hcu_digest,
1019        .export         = kmb_ocs_hcu_export,
1020        .import         = kmb_ocs_hcu_import,
1021        .setkey         = kmb_ocs_hcu_setkey,
1022        .halg = {
1023                .digestsize     = SM3_DIGEST_SIZE,
1024                .statesize      = sizeof(struct ocs_hcu_rctx),
1025                .base   = {
1026                        .cra_name               = "hmac(sm3)",
1027                        .cra_driver_name        = "hmac-sm3-keembay-ocs",
1028                        .cra_priority           = 255,
1029                        .cra_flags              = CRYPTO_ALG_ASYNC,
1030                        .cra_blocksize          = SM3_BLOCK_SIZE,
1031                        .cra_ctxsize            = sizeof(struct ocs_hcu_ctx),
1032                        .cra_alignmask          = 0,
1033                        .cra_module             = THIS_MODULE,
1034                        .cra_init               = kmb_ocs_hcu_hmac_sm3_cra_init,
1035                        .cra_exit               = kmb_ocs_hcu_hmac_cra_exit,
1036                }
1037        }
1038},
1039{
1040        .init           = kmb_ocs_hcu_init,
1041        .update         = kmb_ocs_hcu_update,
1042        .final          = kmb_ocs_hcu_final,
1043        .finup          = kmb_ocs_hcu_finup,
1044        .digest         = kmb_ocs_hcu_digest,
1045        .export         = kmb_ocs_hcu_export,
1046        .import         = kmb_ocs_hcu_import,
1047        .halg = {
1048                .digestsize     = SHA384_DIGEST_SIZE,
1049                .statesize      = sizeof(struct ocs_hcu_rctx),
1050                .base   = {
1051                        .cra_name               = "sha384",
1052                        .cra_driver_name        = "sha384-keembay-ocs",
1053                        .cra_priority           = 255,
1054                        .cra_flags              = CRYPTO_ALG_ASYNC,
1055                        .cra_blocksize          = SHA384_BLOCK_SIZE,
1056                        .cra_ctxsize            = sizeof(struct ocs_hcu_ctx),
1057                        .cra_alignmask          = 0,
1058                        .cra_module             = THIS_MODULE,
1059                        .cra_init               = kmb_ocs_hcu_sha_cra_init,
1060                }
1061        }
1062},
1063{
1064        .init           = kmb_ocs_hcu_init,
1065        .update         = kmb_ocs_hcu_update,
1066        .final          = kmb_ocs_hcu_final,
1067        .finup          = kmb_ocs_hcu_finup,
1068        .digest         = kmb_ocs_hcu_digest,
1069        .export         = kmb_ocs_hcu_export,
1070        .import         = kmb_ocs_hcu_import,
1071        .setkey         = kmb_ocs_hcu_setkey,
1072        .halg = {
1073                .digestsize     = SHA384_DIGEST_SIZE,
1074                .statesize      = sizeof(struct ocs_hcu_rctx),
1075                .base   = {
1076                        .cra_name               = "hmac(sha384)",
1077                        .cra_driver_name        = "hmac-sha384-keembay-ocs",
1078                        .cra_priority           = 255,
1079                        .cra_flags              = CRYPTO_ALG_ASYNC,
1080                        .cra_blocksize          = SHA384_BLOCK_SIZE,
1081                        .cra_ctxsize            = sizeof(struct ocs_hcu_ctx),
1082                        .cra_alignmask          = 0,
1083                        .cra_module             = THIS_MODULE,
1084                        .cra_init               = kmb_ocs_hcu_hmac_cra_init,
1085                        .cra_exit               = kmb_ocs_hcu_hmac_cra_exit,
1086                }
1087        }
1088},
1089{
1090        .init           = kmb_ocs_hcu_init,
1091        .update         = kmb_ocs_hcu_update,
1092        .final          = kmb_ocs_hcu_final,
1093        .finup          = kmb_ocs_hcu_finup,
1094        .digest         = kmb_ocs_hcu_digest,
1095        .export         = kmb_ocs_hcu_export,
1096        .import         = kmb_ocs_hcu_import,
1097        .halg = {
1098                .digestsize     = SHA512_DIGEST_SIZE,
1099                .statesize      = sizeof(struct ocs_hcu_rctx),
1100                .base   = {
1101                        .cra_name               = "sha512",
1102                        .cra_driver_name        = "sha512-keembay-ocs",
1103                        .cra_priority           = 255,
1104                        .cra_flags              = CRYPTO_ALG_ASYNC,
1105                        .cra_blocksize          = SHA512_BLOCK_SIZE,
1106                        .cra_ctxsize            = sizeof(struct ocs_hcu_ctx),
1107                        .cra_alignmask          = 0,
1108                        .cra_module             = THIS_MODULE,
1109                        .cra_init               = kmb_ocs_hcu_sha_cra_init,
1110                }
1111        }
1112},
1113{
1114        .init           = kmb_ocs_hcu_init,
1115        .update         = kmb_ocs_hcu_update,
1116        .final          = kmb_ocs_hcu_final,
1117        .finup          = kmb_ocs_hcu_finup,
1118        .digest         = kmb_ocs_hcu_digest,
1119        .export         = kmb_ocs_hcu_export,
1120        .import         = kmb_ocs_hcu_import,
1121        .setkey         = kmb_ocs_hcu_setkey,
1122        .halg = {
1123                .digestsize     = SHA512_DIGEST_SIZE,
1124                .statesize      = sizeof(struct ocs_hcu_rctx),
1125                .base   = {
1126                        .cra_name               = "hmac(sha512)",
1127                        .cra_driver_name        = "hmac-sha512-keembay-ocs",
1128                        .cra_priority           = 255,
1129                        .cra_flags              = CRYPTO_ALG_ASYNC,
1130                        .cra_blocksize          = SHA512_BLOCK_SIZE,
1131                        .cra_ctxsize            = sizeof(struct ocs_hcu_ctx),
1132                        .cra_alignmask          = 0,
1133                        .cra_module             = THIS_MODULE,
1134                        .cra_init               = kmb_ocs_hcu_hmac_cra_init,
1135                        .cra_exit               = kmb_ocs_hcu_hmac_cra_exit,
1136                }
1137        }
1138},
1139};
1140
1141/* Device tree driver match. */
1142static const struct of_device_id kmb_ocs_hcu_of_match[] = {
1143        {
1144                .compatible = "intel,keembay-ocs-hcu",
1145        },
1146        {}
1147};
1148
1149static int kmb_ocs_hcu_remove(struct platform_device *pdev)
1150{
1151        struct ocs_hcu_dev *hcu_dev;
1152        int rc;
1153
1154        hcu_dev = platform_get_drvdata(pdev);
1155        if (!hcu_dev)
1156                return -ENODEV;
1157
1158        crypto_unregister_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs));
1159
1160        rc = crypto_engine_exit(hcu_dev->engine);
1161
1162        spin_lock_bh(&ocs_hcu.lock);
1163        list_del(&hcu_dev->list);
1164        spin_unlock_bh(&ocs_hcu.lock);
1165
1166        return rc;
1167}
1168
1169static int kmb_ocs_hcu_probe(struct platform_device *pdev)
1170{
1171        struct device *dev = &pdev->dev;
1172        struct ocs_hcu_dev *hcu_dev;
1173        struct resource *hcu_mem;
1174        int rc;
1175
1176        hcu_dev = devm_kzalloc(dev, sizeof(*hcu_dev), GFP_KERNEL);
1177        if (!hcu_dev)
1178                return -ENOMEM;
1179
1180        hcu_dev->dev = dev;
1181
1182        platform_set_drvdata(pdev, hcu_dev);
1183        rc = dma_set_mask_and_coherent(&pdev->dev, OCS_HCU_DMA_BIT_MASK);
1184        if (rc)
1185                return rc;
1186
1187        /* Get the memory address and remap. */
1188        hcu_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1189        if (!hcu_mem) {
1190                dev_err(dev, "Could not retrieve io mem resource.\n");
1191                return -ENODEV;
1192        }
1193
1194        hcu_dev->io_base = devm_ioremap_resource(dev, hcu_mem);
1195        if (IS_ERR(hcu_dev->io_base))
1196                return PTR_ERR(hcu_dev->io_base);
1197
1198        init_completion(&hcu_dev->irq_done);
1199
1200        /* Get and request IRQ. */
1201        hcu_dev->irq = platform_get_irq(pdev, 0);
1202        if (hcu_dev->irq < 0)
1203                return hcu_dev->irq;
1204
1205        rc = devm_request_threaded_irq(&pdev->dev, hcu_dev->irq,
1206                                       ocs_hcu_irq_handler, NULL, 0,
1207                                       "keembay-ocs-hcu", hcu_dev);
1208        if (rc < 0) {
1209                dev_err(dev, "Could not request IRQ.\n");
1210                return rc;
1211        }
1212
1213        INIT_LIST_HEAD(&hcu_dev->list);
1214
1215        spin_lock_bh(&ocs_hcu.lock);
1216        list_add_tail(&hcu_dev->list, &ocs_hcu.dev_list);
1217        spin_unlock_bh(&ocs_hcu.lock);
1218
1219        /* Initialize crypto engine */
1220        hcu_dev->engine = crypto_engine_alloc_init(dev, 1);
1221        if (!hcu_dev->engine) {
1222                rc = -ENOMEM;
1223                goto list_del;
1224        }
1225
1226        rc = crypto_engine_start(hcu_dev->engine);
1227        if (rc) {
1228                dev_err(dev, "Could not start engine.\n");
1229                goto cleanup;
1230        }
1231
1232        /* Security infrastructure guarantees OCS clock is enabled. */
1233
1234        rc = crypto_register_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs));
1235        if (rc) {
1236                dev_err(dev, "Could not register algorithms.\n");
1237                goto cleanup;
1238        }
1239
1240        return 0;
1241
1242cleanup:
1243        crypto_engine_exit(hcu_dev->engine);
1244list_del:
1245        spin_lock_bh(&ocs_hcu.lock);
1246        list_del(&hcu_dev->list);
1247        spin_unlock_bh(&ocs_hcu.lock);
1248
1249        return rc;
1250}
1251
1252/* The OCS driver is a platform device. */
1253static struct platform_driver kmb_ocs_hcu_driver = {
1254        .probe = kmb_ocs_hcu_probe,
1255        .remove = kmb_ocs_hcu_remove,
1256        .driver = {
1257                        .name = DRV_NAME,
1258                        .of_match_table = kmb_ocs_hcu_of_match,
1259                },
1260};
1261
1262module_platform_driver(kmb_ocs_hcu_driver);
1263
1264MODULE_LICENSE("GPL");
1265