linux/crypto/crypto_engine.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Handle async block request by crypto hardware engine.
   4 *
   5 * Copyright (C) 2016 Linaro, Inc.
   6 *
   7 * Author: Baolin Wang <baolin.wang@linaro.org>
   8 */
   9
  10#include <linux/err.h>
  11#include <linux/delay.h>
  12#include <linux/device.h>
  13#include <crypto/engine.h>
  14#include <uapi/linux/sched/types.h>
  15#include "internal.h"
  16
  17#define CRYPTO_ENGINE_MAX_QLEN 10
  18
  19/**
  20 * crypto_finalize_request - finalize one request if the request is done
  21 * @engine: the hardware engine
  22 * @req: the request need to be finalized
  23 * @err: error number
  24 */
  25static void crypto_finalize_request(struct crypto_engine *engine,
  26                                    struct crypto_async_request *req, int err)
  27{
  28        unsigned long flags;
  29        bool finalize_req = false;
  30        int ret;
  31        struct crypto_engine_ctx *enginectx;
  32
  33        /*
  34         * If hardware cannot enqueue more requests
  35         * and retry mechanism is not supported
  36         * make sure we are completing the current request
  37         */
  38        if (!engine->retry_support) {
  39                spin_lock_irqsave(&engine->queue_lock, flags);
  40                if (engine->cur_req == req) {
  41                        finalize_req = true;
  42                        engine->cur_req = NULL;
  43                }
  44                spin_unlock_irqrestore(&engine->queue_lock, flags);
  45        }
  46
  47        if (finalize_req || engine->retry_support) {
  48                enginectx = crypto_tfm_ctx(req->tfm);
  49                if (enginectx->op.prepare_request &&
  50                    enginectx->op.unprepare_request) {
  51                        ret = enginectx->op.unprepare_request(engine, req);
  52                        if (ret)
  53                                dev_err(engine->dev, "failed to unprepare request\n");
  54                }
  55        }
  56        req->complete(req, err);
  57
  58        kthread_queue_work(engine->kworker, &engine->pump_requests);
  59}
  60
  61/**
  62 * crypto_pump_requests - dequeue one request from engine queue to process
  63 * @engine: the hardware engine
  64 * @in_kthread: true if we are in the context of the request pump thread
  65 *
  66 * This function checks if there is any request in the engine queue that
  67 * needs processing and if so call out to the driver to initialize hardware
  68 * and handle each request.
  69 */
  70static void crypto_pump_requests(struct crypto_engine *engine,
  71                                 bool in_kthread)
  72{
  73        struct crypto_async_request *async_req, *backlog;
  74        unsigned long flags;
  75        bool was_busy = false;
  76        int ret;
  77        struct crypto_engine_ctx *enginectx;
  78
  79        spin_lock_irqsave(&engine->queue_lock, flags);
  80
  81        /* Make sure we are not already running a request */
  82        if (!engine->retry_support && engine->cur_req)
  83                goto out;
  84
  85        /* If another context is idling then defer */
  86        if (engine->idling) {
  87                kthread_queue_work(engine->kworker, &engine->pump_requests);
  88                goto out;
  89        }
  90
  91        /* Check if the engine queue is idle */
  92        if (!crypto_queue_len(&engine->queue) || !engine->running) {
  93                if (!engine->busy)
  94                        goto out;
  95
  96                /* Only do teardown in the thread */
  97                if (!in_kthread) {
  98                        kthread_queue_work(engine->kworker,
  99                                           &engine->pump_requests);
 100                        goto out;
 101                }
 102
 103                engine->busy = false;
 104                engine->idling = true;
 105                spin_unlock_irqrestore(&engine->queue_lock, flags);
 106
 107                if (engine->unprepare_crypt_hardware &&
 108                    engine->unprepare_crypt_hardware(engine))
 109                        dev_err(engine->dev, "failed to unprepare crypt hardware\n");
 110
 111                spin_lock_irqsave(&engine->queue_lock, flags);
 112                engine->idling = false;
 113                goto out;
 114        }
 115
 116start_request:
 117        /* Get the fist request from the engine queue to handle */
 118        backlog = crypto_get_backlog(&engine->queue);
 119        async_req = crypto_dequeue_request(&engine->queue);
 120        if (!async_req)
 121                goto out;
 122
 123        /*
 124         * If hardware doesn't support the retry mechanism,
 125         * keep track of the request we are processing now.
 126         * We'll need it on completion (crypto_finalize_request).
 127         */
 128        if (!engine->retry_support)
 129                engine->cur_req = async_req;
 130
 131        if (backlog)
 132                backlog->complete(backlog, -EINPROGRESS);
 133
 134        if (engine->busy)
 135                was_busy = true;
 136        else
 137                engine->busy = true;
 138
 139        spin_unlock_irqrestore(&engine->queue_lock, flags);
 140
 141        /* Until here we get the request need to be encrypted successfully */
 142        if (!was_busy && engine->prepare_crypt_hardware) {
 143                ret = engine->prepare_crypt_hardware(engine);
 144                if (ret) {
 145                        dev_err(engine->dev, "failed to prepare crypt hardware\n");
 146                        goto req_err_2;
 147                }
 148        }
 149
 150        enginectx = crypto_tfm_ctx(async_req->tfm);
 151
 152        if (enginectx->op.prepare_request) {
 153                ret = enginectx->op.prepare_request(engine, async_req);
 154                if (ret) {
 155                        dev_err(engine->dev, "failed to prepare request: %d\n",
 156                                ret);
 157                        goto req_err_2;
 158                }
 159        }
 160        if (!enginectx->op.do_one_request) {
 161                dev_err(engine->dev, "failed to do request\n");
 162                ret = -EINVAL;
 163                goto req_err_1;
 164        }
 165
 166        ret = enginectx->op.do_one_request(engine, async_req);
 167
 168        /* Request unsuccessfully executed by hardware */
 169        if (ret < 0) {
 170                /*
 171                 * If hardware queue is full (-ENOSPC), requeue request
 172                 * regardless of backlog flag.
 173                 * Otherwise, unprepare and complete the request.
 174                 */
 175                if (!engine->retry_support ||
 176                    (ret != -ENOSPC)) {
 177                        dev_err(engine->dev,
 178                                "Failed to do one request from queue: %d\n",
 179                                ret);
 180                        goto req_err_1;
 181                }
 182                /*
 183                 * If retry mechanism is supported,
 184                 * unprepare current request and
 185                 * enqueue it back into crypto-engine queue.
 186                 */
 187                if (enginectx->op.unprepare_request) {
 188                        ret = enginectx->op.unprepare_request(engine,
 189                                                              async_req);
 190                        if (ret)
 191                                dev_err(engine->dev,
 192                                        "failed to unprepare request\n");
 193                }
 194                spin_lock_irqsave(&engine->queue_lock, flags);
 195                /*
 196                 * If hardware was unable to execute request, enqueue it
 197                 * back in front of crypto-engine queue, to keep the order
 198                 * of requests.
 199                 */
 200                crypto_enqueue_request_head(&engine->queue, async_req);
 201
 202                kthread_queue_work(engine->kworker, &engine->pump_requests);
 203                goto out;
 204        }
 205
 206        goto retry;
 207
 208req_err_1:
 209        if (enginectx->op.unprepare_request) {
 210                ret = enginectx->op.unprepare_request(engine, async_req);
 211                if (ret)
 212                        dev_err(engine->dev, "failed to unprepare request\n");
 213        }
 214
 215req_err_2:
 216        async_req->complete(async_req, ret);
 217
 218retry:
 219        /* If retry mechanism is supported, send new requests to engine */
 220        if (engine->retry_support) {
 221                spin_lock_irqsave(&engine->queue_lock, flags);
 222                goto start_request;
 223        }
 224        return;
 225
 226out:
 227        spin_unlock_irqrestore(&engine->queue_lock, flags);
 228
 229        /*
 230         * Batch requests is possible only if
 231         * hardware can enqueue multiple requests
 232         */
 233        if (engine->do_batch_requests) {
 234                ret = engine->do_batch_requests(engine);
 235                if (ret)
 236                        dev_err(engine->dev, "failed to do batch requests: %d\n",
 237                                ret);
 238        }
 239
 240        return;
 241}
 242
 243static void crypto_pump_work(struct kthread_work *work)
 244{
 245        struct crypto_engine *engine =
 246                container_of(work, struct crypto_engine, pump_requests);
 247
 248        crypto_pump_requests(engine, true);
 249}
 250
 251/**
 252 * crypto_transfer_request - transfer the new request into the engine queue
 253 * @engine: the hardware engine
 254 * @req: the request need to be listed into the engine queue
 255 */
 256static int crypto_transfer_request(struct crypto_engine *engine,
 257                                   struct crypto_async_request *req,
 258                                   bool need_pump)
 259{
 260        unsigned long flags;
 261        int ret;
 262
 263        spin_lock_irqsave(&engine->queue_lock, flags);
 264
 265        if (!engine->running) {
 266                spin_unlock_irqrestore(&engine->queue_lock, flags);
 267                return -ESHUTDOWN;
 268        }
 269
 270        ret = crypto_enqueue_request(&engine->queue, req);
 271
 272        if (!engine->busy && need_pump)
 273                kthread_queue_work(engine->kworker, &engine->pump_requests);
 274
 275        spin_unlock_irqrestore(&engine->queue_lock, flags);
 276        return ret;
 277}
 278
 279/**
 280 * crypto_transfer_request_to_engine - transfer one request to list
 281 * into the engine queue
 282 * @engine: the hardware engine
 283 * @req: the request need to be listed into the engine queue
 284 */
 285static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
 286                                             struct crypto_async_request *req)
 287{
 288        return crypto_transfer_request(engine, req, true);
 289}
 290
 291/**
 292 * crypto_transfer_aead_request_to_engine - transfer one aead_request
 293 * to list into the engine queue
 294 * @engine: the hardware engine
 295 * @req: the request need to be listed into the engine queue
 296 */
 297int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
 298                                           struct aead_request *req)
 299{
 300        return crypto_transfer_request_to_engine(engine, &req->base);
 301}
 302EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
 303
 304/**
 305 * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
 306 * to list into the engine queue
 307 * @engine: the hardware engine
 308 * @req: the request need to be listed into the engine queue
 309 */
 310int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
 311                                               struct akcipher_request *req)
 312{
 313        return crypto_transfer_request_to_engine(engine, &req->base);
 314}
 315EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
 316
 317/**
 318 * crypto_transfer_hash_request_to_engine - transfer one ahash_request
 319 * to list into the engine queue
 320 * @engine: the hardware engine
 321 * @req: the request need to be listed into the engine queue
 322 */
 323int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
 324                                           struct ahash_request *req)
 325{
 326        return crypto_transfer_request_to_engine(engine, &req->base);
 327}
 328EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
 329
 330/**
 331 * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
 332 * to list into the engine queue
 333 * @engine: the hardware engine
 334 * @req: the request need to be listed into the engine queue
 335 */
 336int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
 337                                               struct skcipher_request *req)
 338{
 339        return crypto_transfer_request_to_engine(engine, &req->base);
 340}
 341EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
 342
 343/**
 344 * crypto_finalize_aead_request - finalize one aead_request if
 345 * the request is done
 346 * @engine: the hardware engine
 347 * @req: the request need to be finalized
 348 * @err: error number
 349 */
 350void crypto_finalize_aead_request(struct crypto_engine *engine,
 351                                  struct aead_request *req, int err)
 352{
 353        return crypto_finalize_request(engine, &req->base, err);
 354}
 355EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
 356
 357/**
 358 * crypto_finalize_akcipher_request - finalize one akcipher_request if
 359 * the request is done
 360 * @engine: the hardware engine
 361 * @req: the request need to be finalized
 362 * @err: error number
 363 */
 364void crypto_finalize_akcipher_request(struct crypto_engine *engine,
 365                                      struct akcipher_request *req, int err)
 366{
 367        return crypto_finalize_request(engine, &req->base, err);
 368}
 369EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
 370
 371/**
 372 * crypto_finalize_hash_request - finalize one ahash_request if
 373 * the request is done
 374 * @engine: the hardware engine
 375 * @req: the request need to be finalized
 376 * @err: error number
 377 */
 378void crypto_finalize_hash_request(struct crypto_engine *engine,
 379                                  struct ahash_request *req, int err)
 380{
 381        return crypto_finalize_request(engine, &req->base, err);
 382}
 383EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
 384
 385/**
 386 * crypto_finalize_skcipher_request - finalize one skcipher_request if
 387 * the request is done
 388 * @engine: the hardware engine
 389 * @req: the request need to be finalized
 390 * @err: error number
 391 */
 392void crypto_finalize_skcipher_request(struct crypto_engine *engine,
 393                                      struct skcipher_request *req, int err)
 394{
 395        return crypto_finalize_request(engine, &req->base, err);
 396}
 397EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
 398
 399/**
 400 * crypto_engine_start - start the hardware engine
 401 * @engine: the hardware engine need to be started
 402 *
 403 * Return 0 on success, else on fail.
 404 */
 405int crypto_engine_start(struct crypto_engine *engine)
 406{
 407        unsigned long flags;
 408
 409        spin_lock_irqsave(&engine->queue_lock, flags);
 410
 411        if (engine->running || engine->busy) {
 412                spin_unlock_irqrestore(&engine->queue_lock, flags);
 413                return -EBUSY;
 414        }
 415
 416        engine->running = true;
 417        spin_unlock_irqrestore(&engine->queue_lock, flags);
 418
 419        kthread_queue_work(engine->kworker, &engine->pump_requests);
 420
 421        return 0;
 422}
 423EXPORT_SYMBOL_GPL(crypto_engine_start);
 424
 425/**
 426 * crypto_engine_stop - stop the hardware engine
 427 * @engine: the hardware engine need to be stopped
 428 *
 429 * Return 0 on success, else on fail.
 430 */
 431int crypto_engine_stop(struct crypto_engine *engine)
 432{
 433        unsigned long flags;
 434        unsigned int limit = 500;
 435        int ret = 0;
 436
 437        spin_lock_irqsave(&engine->queue_lock, flags);
 438
 439        /*
 440         * If the engine queue is not empty or the engine is on busy state,
 441         * we need to wait for a while to pump the requests of engine queue.
 442         */
 443        while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
 444                spin_unlock_irqrestore(&engine->queue_lock, flags);
 445                msleep(20);
 446                spin_lock_irqsave(&engine->queue_lock, flags);
 447        }
 448
 449        if (crypto_queue_len(&engine->queue) || engine->busy)
 450                ret = -EBUSY;
 451        else
 452                engine->running = false;
 453
 454        spin_unlock_irqrestore(&engine->queue_lock, flags);
 455
 456        if (ret)
 457                dev_warn(engine->dev, "could not stop engine\n");
 458
 459        return ret;
 460}
 461EXPORT_SYMBOL_GPL(crypto_engine_stop);
 462
 463/**
 464 * crypto_engine_alloc_init_and_set - allocate crypto hardware engine structure
 465 * and initialize it by setting the maximum number of entries in the software
 466 * crypto-engine queue.
 467 * @dev: the device attached with one hardware engine
 468 * @retry_support: whether hardware has support for retry mechanism
 469 * @cbk_do_batch: pointer to a callback function to be invoked when executing
 470 *                a batch of requests.
 471 *                This has the form:
 472 *                callback(struct crypto_engine *engine)
 473 *                where:
 474 *                @engine: the crypto engine structure.
 475 * @rt: whether this queue is set to run as a realtime task
 476 * @qlen: maximum size of the crypto-engine queue
 477 *
 478 * This must be called from context that can sleep.
 479 * Return: the crypto engine structure on success, else NULL.
 480 */
 481struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
 482                                                       bool retry_support,
 483                                                       int (*cbk_do_batch)(struct crypto_engine *engine),
 484                                                       bool rt, int qlen)
 485{
 486        struct crypto_engine *engine;
 487
 488        if (!dev)
 489                return NULL;
 490
 491        engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
 492        if (!engine)
 493                return NULL;
 494
 495        engine->dev = dev;
 496        engine->rt = rt;
 497        engine->running = false;
 498        engine->busy = false;
 499        engine->idling = false;
 500        engine->retry_support = retry_support;
 501        engine->priv_data = dev;
 502        /*
 503         * Batch requests is possible only if
 504         * hardware has support for retry mechanism.
 505         */
 506        engine->do_batch_requests = retry_support ? cbk_do_batch : NULL;
 507
 508        snprintf(engine->name, sizeof(engine->name),
 509                 "%s-engine", dev_name(dev));
 510
 511        crypto_init_queue(&engine->queue, qlen);
 512        spin_lock_init(&engine->queue_lock);
 513
 514        engine->kworker = kthread_create_worker(0, "%s", engine->name);
 515        if (IS_ERR(engine->kworker)) {
 516                dev_err(dev, "failed to create crypto request pump task\n");
 517                return NULL;
 518        }
 519        kthread_init_work(&engine->pump_requests, crypto_pump_work);
 520
 521        if (engine->rt) {
 522                dev_info(dev, "will run requests pump with realtime priority\n");
 523                sched_set_fifo(engine->kworker->task);
 524        }
 525
 526        return engine;
 527}
 528EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set);
 529
 530/**
 531 * crypto_engine_alloc_init - allocate crypto hardware engine structure and
 532 * initialize it.
 533 * @dev: the device attached with one hardware engine
 534 * @rt: whether this queue is set to run as a realtime task
 535 *
 536 * This must be called from context that can sleep.
 537 * Return: the crypto engine structure on success, else NULL.
 538 */
 539struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
 540{
 541        return crypto_engine_alloc_init_and_set(dev, false, NULL, rt,
 542                                                CRYPTO_ENGINE_MAX_QLEN);
 543}
 544EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
 545
 546/**
 547 * crypto_engine_exit - free the resources of hardware engine when exit
 548 * @engine: the hardware engine need to be freed
 549 *
 550 * Return 0 for success.
 551 */
 552int crypto_engine_exit(struct crypto_engine *engine)
 553{
 554        int ret;
 555
 556        ret = crypto_engine_stop(engine);
 557        if (ret)
 558                return ret;
 559
 560        kthread_destroy_worker(engine->kworker);
 561
 562        return 0;
 563}
 564EXPORT_SYMBOL_GPL(crypto_engine_exit);
 565
 566MODULE_LICENSE("GPL");
 567MODULE_DESCRIPTION("Crypto hardware engine framework");
 568