linux/crypto/crypto_engine.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Handle async block request by crypto hardware engine.
   4 *
   5 * Copyright (C) 2016 Linaro, Inc.
   6 *
   7 * Author: Baolin Wang <baolin.wang@linaro.org>
   8 */
   9
  10#include <linux/err.h>
  11#include <linux/delay.h>
  12#include <linux/device.h>
  13#include <crypto/engine.h>
  14#include <uapi/linux/sched/types.h>
  15#include "internal.h"
  16
  17#define CRYPTO_ENGINE_MAX_QLEN 10
  18
  19/**
  20 * crypto_finalize_request - finalize one request if the request is done
  21 * @engine: the hardware engine
  22 * @req: the request need to be finalized
  23 * @err: error number
  24 */
  25static void crypto_finalize_request(struct crypto_engine *engine,
  26                                    struct crypto_async_request *req, int err)
  27{
  28        unsigned long flags;
  29        bool finalize_req = false;
  30        int ret;
  31        struct crypto_engine_ctx *enginectx;
  32
  33        /*
  34         * If hardware cannot enqueue more requests
  35         * and retry mechanism is not supported
  36         * make sure we are completing the current request
  37         */
  38        if (!engine->retry_support) {
  39                spin_lock_irqsave(&engine->queue_lock, flags);
  40                if (engine->cur_req == req) {
  41                        finalize_req = true;
  42                        engine->cur_req = NULL;
  43                }
  44                spin_unlock_irqrestore(&engine->queue_lock, flags);
  45        }
  46
  47        if (finalize_req || engine->retry_support) {
  48                enginectx = crypto_tfm_ctx(req->tfm);
  49                if (enginectx->op.prepare_request &&
  50                    enginectx->op.unprepare_request) {
  51                        ret = enginectx->op.unprepare_request(engine, req);
  52                        if (ret)
  53                                dev_err(engine->dev, "failed to unprepare request\n");
  54                }
  55        }
  56        req->complete(req, err);
  57
  58        kthread_queue_work(engine->kworker, &engine->pump_requests);
  59}
  60
  61/**
  62 * crypto_pump_requests - dequeue one request from engine queue to process
  63 * @engine: the hardware engine
  64 * @in_kthread: true if we are in the context of the request pump thread
  65 *
  66 * This function checks if there is any request in the engine queue that
  67 * needs processing and if so call out to the driver to initialize hardware
  68 * and handle each request.
  69 */
  70static void crypto_pump_requests(struct crypto_engine *engine,
  71                                 bool in_kthread)
  72{
  73        struct crypto_async_request *async_req, *backlog;
  74        unsigned long flags;
  75        bool was_busy = false;
  76        int ret;
  77        struct crypto_engine_ctx *enginectx;
  78
  79        spin_lock_irqsave(&engine->queue_lock, flags);
  80
  81        /* Make sure we are not already running a request */
  82        if (!engine->retry_support && engine->cur_req)
  83                goto out;
  84
  85        /* If another context is idling then defer */
  86        if (engine->idling) {
  87                kthread_queue_work(engine->kworker, &engine->pump_requests);
  88                goto out;
  89        }
  90
  91        /* Check if the engine queue is idle */
  92        if (!crypto_queue_len(&engine->queue) || !engine->running) {
  93                if (!engine->busy)
  94                        goto out;
  95
  96                /* Only do teardown in the thread */
  97                if (!in_kthread) {
  98                        kthread_queue_work(engine->kworker,
  99                                           &engine->pump_requests);
 100                        goto out;
 101                }
 102
 103                engine->busy = false;
 104                engine->idling = true;
 105                spin_unlock_irqrestore(&engine->queue_lock, flags);
 106
 107                if (engine->unprepare_crypt_hardware &&
 108                    engine->unprepare_crypt_hardware(engine))
 109                        dev_err(engine->dev, "failed to unprepare crypt hardware\n");
 110
 111                spin_lock_irqsave(&engine->queue_lock, flags);
 112                engine->idling = false;
 113                goto out;
 114        }
 115
 116start_request:
 117        /* Get the fist request from the engine queue to handle */
 118        backlog = crypto_get_backlog(&engine->queue);
 119        async_req = crypto_dequeue_request(&engine->queue);
 120        if (!async_req)
 121                goto out;
 122
 123        /*
 124         * If hardware doesn't support the retry mechanism,
 125         * keep track of the request we are processing now.
 126         * We'll need it on completion (crypto_finalize_request).
 127         */
 128        if (!engine->retry_support)
 129                engine->cur_req = async_req;
 130
 131        if (backlog)
 132                backlog->complete(backlog, -EINPROGRESS);
 133
 134        if (engine->busy)
 135                was_busy = true;
 136        else
 137                engine->busy = true;
 138
 139        spin_unlock_irqrestore(&engine->queue_lock, flags);
 140
 141        /* Until here we get the request need to be encrypted successfully */
 142        if (!was_busy && engine->prepare_crypt_hardware) {
 143                ret = engine->prepare_crypt_hardware(engine);
 144                if (ret) {
 145                        dev_err(engine->dev, "failed to prepare crypt hardware\n");
 146                        goto req_err_2;
 147                }
 148        }
 149
 150        enginectx = crypto_tfm_ctx(async_req->tfm);
 151
 152        if (enginectx->op.prepare_request) {
 153                ret = enginectx->op.prepare_request(engine, async_req);
 154                if (ret) {
 155                        dev_err(engine->dev, "failed to prepare request: %d\n",
 156                                ret);
 157                        goto req_err_2;
 158                }
 159        }
 160        if (!enginectx->op.do_one_request) {
 161                dev_err(engine->dev, "failed to do request\n");
 162                ret = -EINVAL;
 163                goto req_err_1;
 164        }
 165
 166        ret = enginectx->op.do_one_request(engine, async_req);
 167
 168        /* Request unsuccessfully executed by hardware */
 169        if (ret < 0) {
 170                /*
 171                 * If hardware queue is full (-ENOSPC), requeue request
 172                 * regardless of backlog flag.
 173                 * Otherwise, unprepare and complete the request.
 174                 */
 175                if (!engine->retry_support ||
 176                    (ret != -ENOSPC)) {
 177                        dev_err(engine->dev,
 178                                "Failed to do one request from queue: %d\n",
 179                                ret);
 180                        goto req_err_1;
 181                }
 182                /*
 183                 * If retry mechanism is supported,
 184                 * unprepare current request and
 185                 * enqueue it back into crypto-engine queue.
 186                 */
 187                if (enginectx->op.unprepare_request) {
 188                        ret = enginectx->op.unprepare_request(engine,
 189                                                              async_req);
 190                        if (ret)
 191                                dev_err(engine->dev,
 192                                        "failed to unprepare request\n");
 193                }
 194                spin_lock_irqsave(&engine->queue_lock, flags);
 195                /*
 196                 * If hardware was unable to execute request, enqueue it
 197                 * back in front of crypto-engine queue, to keep the order
 198                 * of requests.
 199                 */
 200                crypto_enqueue_request_head(&engine->queue, async_req);
 201
 202                kthread_queue_work(engine->kworker, &engine->pump_requests);
 203                goto out;
 204        }
 205
 206        goto retry;
 207
 208req_err_1:
 209        if (enginectx->op.unprepare_request) {
 210                ret = enginectx->op.unprepare_request(engine, async_req);
 211                if (ret)
 212                        dev_err(engine->dev, "failed to unprepare request\n");
 213        }
 214
 215req_err_2:
 216        async_req->complete(async_req, ret);
 217
 218retry:
 219        /* If retry mechanism is supported, send new requests to engine */
 220        if (engine->retry_support) {
 221                spin_lock_irqsave(&engine->queue_lock, flags);
 222                goto start_request;
 223        }
 224        return;
 225
 226out:
 227        spin_unlock_irqrestore(&engine->queue_lock, flags);
 228
 229        /*
 230         * Batch requests is possible only if
 231         * hardware can enqueue multiple requests
 232         */
 233        if (engine->do_batch_requests) {
 234                ret = engine->do_batch_requests(engine);
 235                if (ret)
 236                        dev_err(engine->dev, "failed to do batch requests: %d\n",
 237                                ret);
 238        }
 239
 240        return;
 241}
 242
 243static void crypto_pump_work(struct kthread_work *work)
 244{
 245        struct crypto_engine *engine =
 246                container_of(work, struct crypto_engine, pump_requests);
 247
 248        crypto_pump_requests(engine, true);
 249}
 250
 251/**
 252 * crypto_transfer_request - transfer the new request into the engine queue
 253 * @engine: the hardware engine
 254 * @req: the request need to be listed into the engine queue
 255 */
 256static int crypto_transfer_request(struct crypto_engine *engine,
 257                                   struct crypto_async_request *req,
 258                                   bool need_pump)
 259{
 260        unsigned long flags;
 261        int ret;
 262
 263        spin_lock_irqsave(&engine->queue_lock, flags);
 264
 265        if (!engine->running) {
 266                spin_unlock_irqrestore(&engine->queue_lock, flags);
 267                return -ESHUTDOWN;
 268        }
 269
 270        ret = crypto_enqueue_request(&engine->queue, req);
 271
 272        if (!engine->busy && need_pump)
 273                kthread_queue_work(engine->kworker, &engine->pump_requests);
 274
 275        spin_unlock_irqrestore(&engine->queue_lock, flags);
 276        return ret;
 277}
 278
 279/**
 280 * crypto_transfer_request_to_engine - transfer one request to list
 281 * into the engine queue
 282 * @engine: the hardware engine
 283 * @req: the request need to be listed into the engine queue
 284 */
 285static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
 286                                             struct crypto_async_request *req)
 287{
 288        return crypto_transfer_request(engine, req, true);
 289}
 290
 291/**
 292 * crypto_transfer_aead_request_to_engine - transfer one aead_request
 293 * to list into the engine queue
 294 * @engine: the hardware engine
 295 * @req: the request need to be listed into the engine queue
 296 */
 297int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
 298                                           struct aead_request *req)
 299{
 300        return crypto_transfer_request_to_engine(engine, &req->base);
 301}
 302EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
 303
 304/**
 305 * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
 306 * to list into the engine queue
 307 * @engine: the hardware engine
 308 * @req: the request need to be listed into the engine queue
 309 */
 310int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
 311                                               struct akcipher_request *req)
 312{
 313        return crypto_transfer_request_to_engine(engine, &req->base);
 314}
 315EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
 316
 317/**
 318 * crypto_transfer_hash_request_to_engine - transfer one ahash_request
 319 * to list into the engine queue
 320 * @engine: the hardware engine
 321 * @req: the request need to be listed into the engine queue
 322 */
 323int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
 324                                           struct ahash_request *req)
 325{
 326        return crypto_transfer_request_to_engine(engine, &req->base);
 327}
 328EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
 329
 330/**
 331 * crypto_transfer_kpp_request_to_engine - transfer one kpp_request to list
 332 * into the engine queue
 333 * @engine: the hardware engine
 334 * @req: the request need to be listed into the engine queue
 335 */
 336int crypto_transfer_kpp_request_to_engine(struct crypto_engine *engine,
 337                                          struct kpp_request *req)
 338{
 339        return crypto_transfer_request_to_engine(engine, &req->base);
 340}
 341EXPORT_SYMBOL_GPL(crypto_transfer_kpp_request_to_engine);
 342
 343/**
 344 * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
 345 * to list into the engine queue
 346 * @engine: the hardware engine
 347 * @req: the request need to be listed into the engine queue
 348 */
 349int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
 350                                               struct skcipher_request *req)
 351{
 352        return crypto_transfer_request_to_engine(engine, &req->base);
 353}
 354EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
 355
 356/**
 357 * crypto_finalize_aead_request - finalize one aead_request if
 358 * the request is done
 359 * @engine: the hardware engine
 360 * @req: the request need to be finalized
 361 * @err: error number
 362 */
 363void crypto_finalize_aead_request(struct crypto_engine *engine,
 364                                  struct aead_request *req, int err)
 365{
 366        return crypto_finalize_request(engine, &req->base, err);
 367}
 368EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
 369
 370/**
 371 * crypto_finalize_akcipher_request - finalize one akcipher_request if
 372 * the request is done
 373 * @engine: the hardware engine
 374 * @req: the request need to be finalized
 375 * @err: error number
 376 */
 377void crypto_finalize_akcipher_request(struct crypto_engine *engine,
 378                                      struct akcipher_request *req, int err)
 379{
 380        return crypto_finalize_request(engine, &req->base, err);
 381}
 382EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
 383
 384/**
 385 * crypto_finalize_hash_request - finalize one ahash_request if
 386 * the request is done
 387 * @engine: the hardware engine
 388 * @req: the request need to be finalized
 389 * @err: error number
 390 */
 391void crypto_finalize_hash_request(struct crypto_engine *engine,
 392                                  struct ahash_request *req, int err)
 393{
 394        return crypto_finalize_request(engine, &req->base, err);
 395}
 396EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
 397
 398/**
 399 * crypto_finalize_kpp_request - finalize one kpp_request if the request is done
 400 * @engine: the hardware engine
 401 * @req: the request need to be finalized
 402 * @err: error number
 403 */
 404void crypto_finalize_kpp_request(struct crypto_engine *engine,
 405                                 struct kpp_request *req, int err)
 406{
 407        return crypto_finalize_request(engine, &req->base, err);
 408}
 409EXPORT_SYMBOL_GPL(crypto_finalize_kpp_request);
 410
 411/**
 412 * crypto_finalize_skcipher_request - finalize one skcipher_request if
 413 * the request is done
 414 * @engine: the hardware engine
 415 * @req: the request need to be finalized
 416 * @err: error number
 417 */
 418void crypto_finalize_skcipher_request(struct crypto_engine *engine,
 419                                      struct skcipher_request *req, int err)
 420{
 421        return crypto_finalize_request(engine, &req->base, err);
 422}
 423EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
 424
 425/**
 426 * crypto_engine_start - start the hardware engine
 427 * @engine: the hardware engine need to be started
 428 *
 429 * Return 0 on success, else on fail.
 430 */
 431int crypto_engine_start(struct crypto_engine *engine)
 432{
 433        unsigned long flags;
 434
 435        spin_lock_irqsave(&engine->queue_lock, flags);
 436
 437        if (engine->running || engine->busy) {
 438                spin_unlock_irqrestore(&engine->queue_lock, flags);
 439                return -EBUSY;
 440        }
 441
 442        engine->running = true;
 443        spin_unlock_irqrestore(&engine->queue_lock, flags);
 444
 445        kthread_queue_work(engine->kworker, &engine->pump_requests);
 446
 447        return 0;
 448}
 449EXPORT_SYMBOL_GPL(crypto_engine_start);
 450
 451/**
 452 * crypto_engine_stop - stop the hardware engine
 453 * @engine: the hardware engine need to be stopped
 454 *
 455 * Return 0 on success, else on fail.
 456 */
 457int crypto_engine_stop(struct crypto_engine *engine)
 458{
 459        unsigned long flags;
 460        unsigned int limit = 500;
 461        int ret = 0;
 462
 463        spin_lock_irqsave(&engine->queue_lock, flags);
 464
 465        /*
 466         * If the engine queue is not empty or the engine is on busy state,
 467         * we need to wait for a while to pump the requests of engine queue.
 468         */
 469        while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
 470                spin_unlock_irqrestore(&engine->queue_lock, flags);
 471                msleep(20);
 472                spin_lock_irqsave(&engine->queue_lock, flags);
 473        }
 474
 475        if (crypto_queue_len(&engine->queue) || engine->busy)
 476                ret = -EBUSY;
 477        else
 478                engine->running = false;
 479
 480        spin_unlock_irqrestore(&engine->queue_lock, flags);
 481
 482        if (ret)
 483                dev_warn(engine->dev, "could not stop engine\n");
 484
 485        return ret;
 486}
 487EXPORT_SYMBOL_GPL(crypto_engine_stop);
 488
 489/**
 490 * crypto_engine_alloc_init_and_set - allocate crypto hardware engine structure
 491 * and initialize it by setting the maximum number of entries in the software
 492 * crypto-engine queue.
 493 * @dev: the device attached with one hardware engine
 494 * @retry_support: whether hardware has support for retry mechanism
 495 * @cbk_do_batch: pointer to a callback function to be invoked when executing
 496 *                a batch of requests.
 497 *                This has the form:
 498 *                callback(struct crypto_engine *engine)
 499 *                where:
 500 *                @engine: the crypto engine structure.
 501 * @rt: whether this queue is set to run as a realtime task
 502 * @qlen: maximum size of the crypto-engine queue
 503 *
 504 * This must be called from context that can sleep.
 505 * Return: the crypto engine structure on success, else NULL.
 506 */
 507struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
 508                                                       bool retry_support,
 509                                                       int (*cbk_do_batch)(struct crypto_engine *engine),
 510                                                       bool rt, int qlen)
 511{
 512        struct crypto_engine *engine;
 513
 514        if (!dev)
 515                return NULL;
 516
 517        engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
 518        if (!engine)
 519                return NULL;
 520
 521        engine->dev = dev;
 522        engine->rt = rt;
 523        engine->running = false;
 524        engine->busy = false;
 525        engine->idling = false;
 526        engine->retry_support = retry_support;
 527        engine->priv_data = dev;
 528        /*
 529         * Batch requests is possible only if
 530         * hardware has support for retry mechanism.
 531         */
 532        engine->do_batch_requests = retry_support ? cbk_do_batch : NULL;
 533
 534        snprintf(engine->name, sizeof(engine->name),
 535                 "%s-engine", dev_name(dev));
 536
 537        crypto_init_queue(&engine->queue, qlen);
 538        spin_lock_init(&engine->queue_lock);
 539
 540        engine->kworker = kthread_create_worker(0, "%s", engine->name);
 541        if (IS_ERR(engine->kworker)) {
 542                dev_err(dev, "failed to create crypto request pump task\n");
 543                return NULL;
 544        }
 545        kthread_init_work(&engine->pump_requests, crypto_pump_work);
 546
 547        if (engine->rt) {
 548                dev_info(dev, "will run requests pump with realtime priority\n");
 549                sched_set_fifo(engine->kworker->task);
 550        }
 551
 552        return engine;
 553}
 554EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set);
 555
 556/**
 557 * crypto_engine_alloc_init - allocate crypto hardware engine structure and
 558 * initialize it.
 559 * @dev: the device attached with one hardware engine
 560 * @rt: whether this queue is set to run as a realtime task
 561 *
 562 * This must be called from context that can sleep.
 563 * Return: the crypto engine structure on success, else NULL.
 564 */
 565struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
 566{
 567        return crypto_engine_alloc_init_and_set(dev, false, NULL, rt,
 568                                                CRYPTO_ENGINE_MAX_QLEN);
 569}
 570EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
 571
 572/**
 573 * crypto_engine_exit - free the resources of hardware engine when exit
 574 * @engine: the hardware engine need to be freed
 575 *
 576 * Return 0 for success.
 577 */
 578int crypto_engine_exit(struct crypto_engine *engine)
 579{
 580        int ret;
 581
 582        ret = crypto_engine_stop(engine);
 583        if (ret)
 584                return ret;
 585
 586        kthread_destroy_worker(engine->kworker);
 587
 588        return 0;
 589}
 590EXPORT_SYMBOL_GPL(crypto_engine_exit);
 591
 592MODULE_LICENSE("GPL");
 593MODULE_DESCRIPTION("Crypto hardware engine framework");
 594