linux/crypto/cryptd.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Software async crypto daemon.
   4 *
   5 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
   6 *
   7 * Added AEAD support to cryptd.
   8 *    Authors: Tadeusz Struk (tadeusz.struk@intel.com)
   9 *             Adrian Hoban <adrian.hoban@intel.com>
  10 *             Gabriele Paoloni <gabriele.paoloni@intel.com>
  11 *             Aidan O'Mahony (aidan.o.mahony@intel.com)
  12 *    Copyright (c) 2010, Intel Corporation.
  13 */
  14
  15#include <crypto/internal/hash.h>
  16#include <crypto/internal/aead.h>
  17#include <crypto/internal/skcipher.h>
  18#include <crypto/cryptd.h>
  19#include <linux/atomic.h>
  20#include <linux/err.h>
  21#include <linux/init.h>
  22#include <linux/kernel.h>
  23#include <linux/list.h>
  24#include <linux/module.h>
  25#include <linux/scatterlist.h>
  26#include <linux/sched.h>
  27#include <linux/slab.h>
  28#include <linux/workqueue.h>
  29
  30static unsigned int cryptd_max_cpu_qlen = 1000;
  31module_param(cryptd_max_cpu_qlen, uint, 0);
  32MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
  33
  34static struct workqueue_struct *cryptd_wq;
  35
  36struct cryptd_cpu_queue {
  37        struct crypto_queue queue;
  38        struct work_struct work;
  39};
  40
  41struct cryptd_queue {
  42        struct cryptd_cpu_queue __percpu *cpu_queue;
  43};
  44
  45struct cryptd_instance_ctx {
  46        struct crypto_spawn spawn;
  47        struct cryptd_queue *queue;
  48};
  49
  50struct skcipherd_instance_ctx {
  51        struct crypto_skcipher_spawn spawn;
  52        struct cryptd_queue *queue;
  53};
  54
  55struct hashd_instance_ctx {
  56        struct crypto_shash_spawn spawn;
  57        struct cryptd_queue *queue;
  58};
  59
  60struct aead_instance_ctx {
  61        struct crypto_aead_spawn aead_spawn;
  62        struct cryptd_queue *queue;
  63};
  64
  65struct cryptd_skcipher_ctx {
  66        atomic_t refcnt;
  67        struct crypto_sync_skcipher *child;
  68};
  69
  70struct cryptd_skcipher_request_ctx {
  71        crypto_completion_t complete;
  72};
  73
  74struct cryptd_hash_ctx {
  75        atomic_t refcnt;
  76        struct crypto_shash *child;
  77};
  78
  79struct cryptd_hash_request_ctx {
  80        crypto_completion_t complete;
  81        struct shash_desc desc;
  82};
  83
  84struct cryptd_aead_ctx {
  85        atomic_t refcnt;
  86        struct crypto_aead *child;
  87};
  88
  89struct cryptd_aead_request_ctx {
  90        crypto_completion_t complete;
  91};
  92
  93static void cryptd_queue_worker(struct work_struct *work);
  94
  95static int cryptd_init_queue(struct cryptd_queue *queue,
  96                             unsigned int max_cpu_qlen)
  97{
  98        int cpu;
  99        struct cryptd_cpu_queue *cpu_queue;
 100
 101        queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
 102        if (!queue->cpu_queue)
 103                return -ENOMEM;
 104        for_each_possible_cpu(cpu) {
 105                cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
 106                crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
 107                INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
 108        }
 109        pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
 110        return 0;
 111}
 112
 113static void cryptd_fini_queue(struct cryptd_queue *queue)
 114{
 115        int cpu;
 116        struct cryptd_cpu_queue *cpu_queue;
 117
 118        for_each_possible_cpu(cpu) {
 119                cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
 120                BUG_ON(cpu_queue->queue.qlen);
 121        }
 122        free_percpu(queue->cpu_queue);
 123}
 124
 125static int cryptd_enqueue_request(struct cryptd_queue *queue,
 126                                  struct crypto_async_request *request)
 127{
 128        int cpu, err;
 129        struct cryptd_cpu_queue *cpu_queue;
 130        atomic_t *refcnt;
 131
 132        cpu = get_cpu();
 133        cpu_queue = this_cpu_ptr(queue->cpu_queue);
 134        err = crypto_enqueue_request(&cpu_queue->queue, request);
 135
 136        refcnt = crypto_tfm_ctx(request->tfm);
 137
 138        if (err == -ENOSPC)
 139                goto out_put_cpu;
 140
 141        queue_work_on(cpu, cryptd_wq, &cpu_queue->work);
 142
 143        if (!atomic_read(refcnt))
 144                goto out_put_cpu;
 145
 146        atomic_inc(refcnt);
 147
 148out_put_cpu:
 149        put_cpu();
 150
 151        return err;
 152}
 153
 154/* Called in workqueue context, do one real cryption work (via
 155 * req->complete) and reschedule itself if there are more work to
 156 * do. */
 157static void cryptd_queue_worker(struct work_struct *work)
 158{
 159        struct cryptd_cpu_queue *cpu_queue;
 160        struct crypto_async_request *req, *backlog;
 161
 162        cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
 163        /*
 164         * Only handle one request at a time to avoid hogging crypto workqueue.
 165         * preempt_disable/enable is used to prevent being preempted by
 166         * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
 167         * cryptd_enqueue_request() being accessed from software interrupts.
 168         */
 169        local_bh_disable();
 170        preempt_disable();
 171        backlog = crypto_get_backlog(&cpu_queue->queue);
 172        req = crypto_dequeue_request(&cpu_queue->queue);
 173        preempt_enable();
 174        local_bh_enable();
 175
 176        if (!req)
 177                return;
 178
 179        if (backlog)
 180                backlog->complete(backlog, -EINPROGRESS);
 181        req->complete(req, 0);
 182
 183        if (cpu_queue->queue.qlen)
 184                queue_work(cryptd_wq, &cpu_queue->work);
 185}
 186
 187static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
 188{
 189        struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
 190        struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
 191        return ictx->queue;
 192}
 193
 194static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
 195                                         u32 *mask)
 196{
 197        struct crypto_attr_type *algt;
 198
 199        algt = crypto_get_attr_type(tb);
 200        if (IS_ERR(algt))
 201                return;
 202
 203        *type |= algt->type & CRYPTO_ALG_INTERNAL;
 204        *mask |= algt->mask & CRYPTO_ALG_INTERNAL;
 205}
 206
 207static int cryptd_init_instance(struct crypto_instance *inst,
 208                                struct crypto_alg *alg)
 209{
 210        if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
 211                     "cryptd(%s)",
 212                     alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
 213                return -ENAMETOOLONG;
 214
 215        memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
 216
 217        inst->alg.cra_priority = alg->cra_priority + 50;
 218        inst->alg.cra_blocksize = alg->cra_blocksize;
 219        inst->alg.cra_alignmask = alg->cra_alignmask;
 220
 221        return 0;
 222}
 223
 224static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
 225                                   unsigned int tail)
 226{
 227        char *p;
 228        struct crypto_instance *inst;
 229        int err;
 230
 231        p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
 232        if (!p)
 233                return ERR_PTR(-ENOMEM);
 234
 235        inst = (void *)(p + head);
 236
 237        err = cryptd_init_instance(inst, alg);
 238        if (err)
 239                goto out_free_inst;
 240
 241out:
 242        return p;
 243
 244out_free_inst:
 245        kfree(p);
 246        p = ERR_PTR(err);
 247        goto out;
 248}
 249
 250static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
 251                                  const u8 *key, unsigned int keylen)
 252{
 253        struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
 254        struct crypto_sync_skcipher *child = ctx->child;
 255        int err;
 256
 257        crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
 258        crypto_sync_skcipher_set_flags(child,
 259                                       crypto_skcipher_get_flags(parent) &
 260                                         CRYPTO_TFM_REQ_MASK);
 261        err = crypto_sync_skcipher_setkey(child, key, keylen);
 262        crypto_skcipher_set_flags(parent,
 263                                  crypto_sync_skcipher_get_flags(child) &
 264                                          CRYPTO_TFM_RES_MASK);
 265        return err;
 266}
 267
 268static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
 269{
 270        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 271        struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 272        struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
 273        int refcnt = atomic_read(&ctx->refcnt);
 274
 275        local_bh_disable();
 276        rctx->complete(&req->base, err);
 277        local_bh_enable();
 278
 279        if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
 280                crypto_free_skcipher(tfm);
 281}
 282
 283static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
 284                                    int err)
 285{
 286        struct skcipher_request *req = skcipher_request_cast(base);
 287        struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
 288        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 289        struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 290        struct crypto_sync_skcipher *child = ctx->child;
 291        SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
 292
 293        if (unlikely(err == -EINPROGRESS))
 294                goto out;
 295
 296        skcipher_request_set_sync_tfm(subreq, child);
 297        skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
 298                                      NULL, NULL);
 299        skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
 300                                   req->iv);
 301
 302        err = crypto_skcipher_encrypt(subreq);
 303        skcipher_request_zero(subreq);
 304
 305        req->base.complete = rctx->complete;
 306
 307out:
 308        cryptd_skcipher_complete(req, err);
 309}
 310
 311static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
 312                                    int err)
 313{
 314        struct skcipher_request *req = skcipher_request_cast(base);
 315        struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
 316        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 317        struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 318        struct crypto_sync_skcipher *child = ctx->child;
 319        SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
 320
 321        if (unlikely(err == -EINPROGRESS))
 322                goto out;
 323
 324        skcipher_request_set_sync_tfm(subreq, child);
 325        skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
 326                                      NULL, NULL);
 327        skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
 328                                   req->iv);
 329
 330        err = crypto_skcipher_decrypt(subreq);
 331        skcipher_request_zero(subreq);
 332
 333        req->base.complete = rctx->complete;
 334
 335out:
 336        cryptd_skcipher_complete(req, err);
 337}
 338
 339static int cryptd_skcipher_enqueue(struct skcipher_request *req,
 340                                   crypto_completion_t compl)
 341{
 342        struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
 343        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 344        struct cryptd_queue *queue;
 345
 346        queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
 347        rctx->complete = req->base.complete;
 348        req->base.complete = compl;
 349
 350        return cryptd_enqueue_request(queue, &req->base);
 351}
 352
 353static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
 354{
 355        return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
 356}
 357
 358static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
 359{
 360        return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
 361}
 362
 363static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
 364{
 365        struct skcipher_instance *inst = skcipher_alg_instance(tfm);
 366        struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst);
 367        struct crypto_skcipher_spawn *spawn = &ictx->spawn;
 368        struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 369        struct crypto_skcipher *cipher;
 370
 371        cipher = crypto_spawn_skcipher(spawn);
 372        if (IS_ERR(cipher))
 373                return PTR_ERR(cipher);
 374
 375        ctx->child = (struct crypto_sync_skcipher *)cipher;
 376        crypto_skcipher_set_reqsize(
 377                tfm, sizeof(struct cryptd_skcipher_request_ctx));
 378        return 0;
 379}
 380
 381static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
 382{
 383        struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 384
 385        crypto_free_sync_skcipher(ctx->child);
 386}
 387
 388static void cryptd_skcipher_free(struct skcipher_instance *inst)
 389{
 390        struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
 391
 392        crypto_drop_skcipher(&ctx->spawn);
 393        kfree(inst);
 394}
 395
 396static int cryptd_create_skcipher(struct crypto_template *tmpl,
 397                                  struct rtattr **tb,
 398                                  struct cryptd_queue *queue)
 399{
 400        struct skcipherd_instance_ctx *ctx;
 401        struct skcipher_instance *inst;
 402        struct skcipher_alg *alg;
 403        const char *name;
 404        u32 type;
 405        u32 mask;
 406        int err;
 407
 408        type = 0;
 409        mask = CRYPTO_ALG_ASYNC;
 410
 411        cryptd_check_internal(tb, &type, &mask);
 412
 413        name = crypto_attr_alg_name(tb[1]);
 414        if (IS_ERR(name))
 415                return PTR_ERR(name);
 416
 417        inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
 418        if (!inst)
 419                return -ENOMEM;
 420
 421        ctx = skcipher_instance_ctx(inst);
 422        ctx->queue = queue;
 423
 424        crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst));
 425        err = crypto_grab_skcipher(&ctx->spawn, name, type, mask);
 426        if (err)
 427                goto out_free_inst;
 428
 429        alg = crypto_spawn_skcipher_alg(&ctx->spawn);
 430        err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
 431        if (err)
 432                goto out_drop_skcipher;
 433
 434        inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
 435                                   (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
 436
 437        inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
 438        inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
 439        inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
 440        inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
 441
 442        inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
 443
 444        inst->alg.init = cryptd_skcipher_init_tfm;
 445        inst->alg.exit = cryptd_skcipher_exit_tfm;
 446
 447        inst->alg.setkey = cryptd_skcipher_setkey;
 448        inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
 449        inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
 450
 451        inst->free = cryptd_skcipher_free;
 452
 453        err = skcipher_register_instance(tmpl, inst);
 454        if (err) {
 455out_drop_skcipher:
 456                crypto_drop_skcipher(&ctx->spawn);
 457out_free_inst:
 458                kfree(inst);
 459        }
 460        return err;
 461}
 462
 463static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
 464{
 465        struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
 466        struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
 467        struct crypto_shash_spawn *spawn = &ictx->spawn;
 468        struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 469        struct crypto_shash *hash;
 470
 471        hash = crypto_spawn_shash(spawn);
 472        if (IS_ERR(hash))
 473                return PTR_ERR(hash);
 474
 475        ctx->child = hash;
 476        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 477                                 sizeof(struct cryptd_hash_request_ctx) +
 478                                 crypto_shash_descsize(hash));
 479        return 0;
 480}
 481
 482static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
 483{
 484        struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 485
 486        crypto_free_shash(ctx->child);
 487}
 488
 489static int cryptd_hash_setkey(struct crypto_ahash *parent,
 490                                   const u8 *key, unsigned int keylen)
 491{
 492        struct cryptd_hash_ctx *ctx   = crypto_ahash_ctx(parent);
 493        struct crypto_shash *child = ctx->child;
 494        int err;
 495
 496        crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
 497        crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
 498                                      CRYPTO_TFM_REQ_MASK);
 499        err = crypto_shash_setkey(child, key, keylen);
 500        crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
 501                                       CRYPTO_TFM_RES_MASK);
 502        return err;
 503}
 504
 505static int cryptd_hash_enqueue(struct ahash_request *req,
 506                                crypto_completion_t compl)
 507{
 508        struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 509        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 510        struct cryptd_queue *queue =
 511                cryptd_get_queue(crypto_ahash_tfm(tfm));
 512
 513        rctx->complete = req->base.complete;
 514        req->base.complete = compl;
 515
 516        return cryptd_enqueue_request(queue, &req->base);
 517}
 518
 519static void cryptd_hash_complete(struct ahash_request *req, int err)
 520{
 521        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 522        struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 523        struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 524        int refcnt = atomic_read(&ctx->refcnt);
 525
 526        local_bh_disable();
 527        rctx->complete(&req->base, err);
 528        local_bh_enable();
 529
 530        if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
 531                crypto_free_ahash(tfm);
 532}
 533
 534static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
 535{
 536        struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
 537        struct crypto_shash *child = ctx->child;
 538        struct ahash_request *req = ahash_request_cast(req_async);
 539        struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 540        struct shash_desc *desc = &rctx->desc;
 541
 542        if (unlikely(err == -EINPROGRESS))
 543                goto out;
 544
 545        desc->tfm = child;
 546
 547        err = crypto_shash_init(desc);
 548
 549        req->base.complete = rctx->complete;
 550
 551out:
 552        cryptd_hash_complete(req, err);
 553}
 554
 555static int cryptd_hash_init_enqueue(struct ahash_request *req)
 556{
 557        return cryptd_hash_enqueue(req, cryptd_hash_init);
 558}
 559
 560static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
 561{
 562        struct ahash_request *req = ahash_request_cast(req_async);
 563        struct cryptd_hash_request_ctx *rctx;
 564
 565        rctx = ahash_request_ctx(req);
 566
 567        if (unlikely(err == -EINPROGRESS))
 568                goto out;
 569
 570        err = shash_ahash_update(req, &rctx->desc);
 571
 572        req->base.complete = rctx->complete;
 573
 574out:
 575        cryptd_hash_complete(req, err);
 576}
 577
 578static int cryptd_hash_update_enqueue(struct ahash_request *req)
 579{
 580        return cryptd_hash_enqueue(req, cryptd_hash_update);
 581}
 582
 583static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
 584{
 585        struct ahash_request *req = ahash_request_cast(req_async);
 586        struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 587
 588        if (unlikely(err == -EINPROGRESS))
 589                goto out;
 590
 591        err = crypto_shash_final(&rctx->desc, req->result);
 592
 593        req->base.complete = rctx->complete;
 594
 595out:
 596        cryptd_hash_complete(req, err);
 597}
 598
 599static int cryptd_hash_final_enqueue(struct ahash_request *req)
 600{
 601        return cryptd_hash_enqueue(req, cryptd_hash_final);
 602}
 603
 604static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
 605{
 606        struct ahash_request *req = ahash_request_cast(req_async);
 607        struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 608
 609        if (unlikely(err == -EINPROGRESS))
 610                goto out;
 611
 612        err = shash_ahash_finup(req, &rctx->desc);
 613
 614        req->base.complete = rctx->complete;
 615
 616out:
 617        cryptd_hash_complete(req, err);
 618}
 619
 620static int cryptd_hash_finup_enqueue(struct ahash_request *req)
 621{
 622        return cryptd_hash_enqueue(req, cryptd_hash_finup);
 623}
 624
 625static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
 626{
 627        struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
 628        struct crypto_shash *child = ctx->child;
 629        struct ahash_request *req = ahash_request_cast(req_async);
 630        struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 631        struct shash_desc *desc = &rctx->desc;
 632
 633        if (unlikely(err == -EINPROGRESS))
 634                goto out;
 635
 636        desc->tfm = child;
 637
 638        err = shash_ahash_digest(req, desc);
 639
 640        req->base.complete = rctx->complete;
 641
 642out:
 643        cryptd_hash_complete(req, err);
 644}
 645
 646static int cryptd_hash_digest_enqueue(struct ahash_request *req)
 647{
 648        return cryptd_hash_enqueue(req, cryptd_hash_digest);
 649}
 650
 651static int cryptd_hash_export(struct ahash_request *req, void *out)
 652{
 653        struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 654
 655        return crypto_shash_export(&rctx->desc, out);
 656}
 657
 658static int cryptd_hash_import(struct ahash_request *req, const void *in)
 659{
 660        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 661        struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 662        struct shash_desc *desc = cryptd_shash_desc(req);
 663
 664        desc->tfm = ctx->child;
 665
 666        return crypto_shash_import(desc, in);
 667}
 668
 669static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
 670                              struct cryptd_queue *queue)
 671{
 672        struct hashd_instance_ctx *ctx;
 673        struct ahash_instance *inst;
 674        struct shash_alg *salg;
 675        struct crypto_alg *alg;
 676        u32 type = 0;
 677        u32 mask = 0;
 678        int err;
 679
 680        cryptd_check_internal(tb, &type, &mask);
 681
 682        salg = shash_attr_alg(tb[1], type, mask);
 683        if (IS_ERR(salg))
 684                return PTR_ERR(salg);
 685
 686        alg = &salg->base;
 687        inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
 688                                     sizeof(*ctx));
 689        err = PTR_ERR(inst);
 690        if (IS_ERR(inst))
 691                goto out_put_alg;
 692
 693        ctx = ahash_instance_ctx(inst);
 694        ctx->queue = queue;
 695
 696        err = crypto_init_shash_spawn(&ctx->spawn, salg,
 697                                      ahash_crypto_instance(inst));
 698        if (err)
 699                goto out_free_inst;
 700
 701        inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC |
 702                (alg->cra_flags & (CRYPTO_ALG_INTERNAL |
 703                                   CRYPTO_ALG_OPTIONAL_KEY));
 704
 705        inst->alg.halg.digestsize = salg->digestsize;
 706        inst->alg.halg.statesize = salg->statesize;
 707        inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
 708
 709        inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
 710        inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
 711
 712        inst->alg.init   = cryptd_hash_init_enqueue;
 713        inst->alg.update = cryptd_hash_update_enqueue;
 714        inst->alg.final  = cryptd_hash_final_enqueue;
 715        inst->alg.finup  = cryptd_hash_finup_enqueue;
 716        inst->alg.export = cryptd_hash_export;
 717        inst->alg.import = cryptd_hash_import;
 718        if (crypto_shash_alg_has_setkey(salg))
 719                inst->alg.setkey = cryptd_hash_setkey;
 720        inst->alg.digest = cryptd_hash_digest_enqueue;
 721
 722        err = ahash_register_instance(tmpl, inst);
 723        if (err) {
 724                crypto_drop_shash(&ctx->spawn);
 725out_free_inst:
 726                kfree(inst);
 727        }
 728
 729out_put_alg:
 730        crypto_mod_put(alg);
 731        return err;
 732}
 733
 734static int cryptd_aead_setkey(struct crypto_aead *parent,
 735                              const u8 *key, unsigned int keylen)
 736{
 737        struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
 738        struct crypto_aead *child = ctx->child;
 739
 740        return crypto_aead_setkey(child, key, keylen);
 741}
 742
 743static int cryptd_aead_setauthsize(struct crypto_aead *parent,
 744                                   unsigned int authsize)
 745{
 746        struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
 747        struct crypto_aead *child = ctx->child;
 748
 749        return crypto_aead_setauthsize(child, authsize);
 750}
 751
 752static void cryptd_aead_crypt(struct aead_request *req,
 753                        struct crypto_aead *child,
 754                        int err,
 755                        int (*crypt)(struct aead_request *req))
 756{
 757        struct cryptd_aead_request_ctx *rctx;
 758        struct cryptd_aead_ctx *ctx;
 759        crypto_completion_t compl;
 760        struct crypto_aead *tfm;
 761        int refcnt;
 762
 763        rctx = aead_request_ctx(req);
 764        compl = rctx->complete;
 765
 766        tfm = crypto_aead_reqtfm(req);
 767
 768        if (unlikely(err == -EINPROGRESS))
 769                goto out;
 770        aead_request_set_tfm(req, child);
 771        err = crypt( req );
 772
 773out:
 774        ctx = crypto_aead_ctx(tfm);
 775        refcnt = atomic_read(&ctx->refcnt);
 776
 777        local_bh_disable();
 778        compl(&req->base, err);
 779        local_bh_enable();
 780
 781        if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
 782                crypto_free_aead(tfm);
 783}
 784
 785static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
 786{
 787        struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
 788        struct crypto_aead *child = ctx->child;
 789        struct aead_request *req;
 790
 791        req = container_of(areq, struct aead_request, base);
 792        cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
 793}
 794
 795static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
 796{
 797        struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
 798        struct crypto_aead *child = ctx->child;
 799        struct aead_request *req;
 800
 801        req = container_of(areq, struct aead_request, base);
 802        cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
 803}
 804
 805static int cryptd_aead_enqueue(struct aead_request *req,
 806                                    crypto_completion_t compl)
 807{
 808        struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
 809        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 810        struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
 811
 812        rctx->complete = req->base.complete;
 813        req->base.complete = compl;
 814        return cryptd_enqueue_request(queue, &req->base);
 815}
 816
 817static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
 818{
 819        return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
 820}
 821
 822static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
 823{
 824        return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
 825}
 826
 827static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
 828{
 829        struct aead_instance *inst = aead_alg_instance(tfm);
 830        struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
 831        struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
 832        struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
 833        struct crypto_aead *cipher;
 834
 835        cipher = crypto_spawn_aead(spawn);
 836        if (IS_ERR(cipher))
 837                return PTR_ERR(cipher);
 838
 839        ctx->child = cipher;
 840        crypto_aead_set_reqsize(
 841                tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
 842                         crypto_aead_reqsize(cipher)));
 843        return 0;
 844}
 845
 846static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
 847{
 848        struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
 849        crypto_free_aead(ctx->child);
 850}
 851
 852static int cryptd_create_aead(struct crypto_template *tmpl,
 853                              struct rtattr **tb,
 854                              struct cryptd_queue *queue)
 855{
 856        struct aead_instance_ctx *ctx;
 857        struct aead_instance *inst;
 858        struct aead_alg *alg;
 859        const char *name;
 860        u32 type = 0;
 861        u32 mask = CRYPTO_ALG_ASYNC;
 862        int err;
 863
 864        cryptd_check_internal(tb, &type, &mask);
 865
 866        name = crypto_attr_alg_name(tb[1]);
 867        if (IS_ERR(name))
 868                return PTR_ERR(name);
 869
 870        inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
 871        if (!inst)
 872                return -ENOMEM;
 873
 874        ctx = aead_instance_ctx(inst);
 875        ctx->queue = queue;
 876
 877        crypto_set_aead_spawn(&ctx->aead_spawn, aead_crypto_instance(inst));
 878        err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask);
 879        if (err)
 880                goto out_free_inst;
 881
 882        alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
 883        err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
 884        if (err)
 885                goto out_drop_aead;
 886
 887        inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
 888                                   (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
 889        inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
 890
 891        inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
 892        inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
 893
 894        inst->alg.init = cryptd_aead_init_tfm;
 895        inst->alg.exit = cryptd_aead_exit_tfm;
 896        inst->alg.setkey = cryptd_aead_setkey;
 897        inst->alg.setauthsize = cryptd_aead_setauthsize;
 898        inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
 899        inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
 900
 901        err = aead_register_instance(tmpl, inst);
 902        if (err) {
 903out_drop_aead:
 904                crypto_drop_aead(&ctx->aead_spawn);
 905out_free_inst:
 906                kfree(inst);
 907        }
 908        return err;
 909}
 910
 911static struct cryptd_queue queue;
 912
 913static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
 914{
 915        struct crypto_attr_type *algt;
 916
 917        algt = crypto_get_attr_type(tb);
 918        if (IS_ERR(algt))
 919                return PTR_ERR(algt);
 920
 921        switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
 922        case CRYPTO_ALG_TYPE_BLKCIPHER:
 923                return cryptd_create_skcipher(tmpl, tb, &queue);
 924        case CRYPTO_ALG_TYPE_HASH:
 925                return cryptd_create_hash(tmpl, tb, &queue);
 926        case CRYPTO_ALG_TYPE_AEAD:
 927                return cryptd_create_aead(tmpl, tb, &queue);
 928        }
 929
 930        return -EINVAL;
 931}
 932
 933static void cryptd_free(struct crypto_instance *inst)
 934{
 935        struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
 936        struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
 937        struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
 938
 939        switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
 940        case CRYPTO_ALG_TYPE_AHASH:
 941                crypto_drop_shash(&hctx->spawn);
 942                kfree(ahash_instance(inst));
 943                return;
 944        case CRYPTO_ALG_TYPE_AEAD:
 945                crypto_drop_aead(&aead_ctx->aead_spawn);
 946                kfree(aead_instance(inst));
 947                return;
 948        default:
 949                crypto_drop_spawn(&ctx->spawn);
 950                kfree(inst);
 951        }
 952}
 953
 954static struct crypto_template cryptd_tmpl = {
 955        .name = "cryptd",
 956        .create = cryptd_create,
 957        .free = cryptd_free,
 958        .module = THIS_MODULE,
 959};
 960
 961struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
 962                                              u32 type, u32 mask)
 963{
 964        char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
 965        struct cryptd_skcipher_ctx *ctx;
 966        struct crypto_skcipher *tfm;
 967
 968        if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
 969                     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
 970                return ERR_PTR(-EINVAL);
 971
 972        tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
 973        if (IS_ERR(tfm))
 974                return ERR_CAST(tfm);
 975
 976        if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
 977                crypto_free_skcipher(tfm);
 978                return ERR_PTR(-EINVAL);
 979        }
 980
 981        ctx = crypto_skcipher_ctx(tfm);
 982        atomic_set(&ctx->refcnt, 1);
 983
 984        return container_of(tfm, struct cryptd_skcipher, base);
 985}
 986EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
 987
 988struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
 989{
 990        struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
 991
 992        return &ctx->child->base;
 993}
 994EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
 995
 996bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
 997{
 998        struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
 999
1000        return atomic_read(&ctx->refcnt) - 1;
1001}
1002EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
1003
1004void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
1005{
1006        struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
1007
1008        if (atomic_dec_and_test(&ctx->refcnt))
1009                crypto_free_skcipher(&tfm->base);
1010}
1011EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
1012
1013struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
1014                                        u32 type, u32 mask)
1015{
1016        char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1017        struct cryptd_hash_ctx *ctx;
1018        struct crypto_ahash *tfm;
1019
1020        if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1021                     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1022                return ERR_PTR(-EINVAL);
1023        tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
1024        if (IS_ERR(tfm))
1025                return ERR_CAST(tfm);
1026        if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1027                crypto_free_ahash(tfm);
1028                return ERR_PTR(-EINVAL);
1029        }
1030
1031        ctx = crypto_ahash_ctx(tfm);
1032        atomic_set(&ctx->refcnt, 1);
1033
1034        return __cryptd_ahash_cast(tfm);
1035}
1036EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
1037
1038struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
1039{
1040        struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1041
1042        return ctx->child;
1043}
1044EXPORT_SYMBOL_GPL(cryptd_ahash_child);
1045
1046struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
1047{
1048        struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
1049        return &rctx->desc;
1050}
1051EXPORT_SYMBOL_GPL(cryptd_shash_desc);
1052
1053bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
1054{
1055        struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1056
1057        return atomic_read(&ctx->refcnt) - 1;
1058}
1059EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
1060
1061void cryptd_free_ahash(struct cryptd_ahash *tfm)
1062{
1063        struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1064
1065        if (atomic_dec_and_test(&ctx->refcnt))
1066                crypto_free_ahash(&tfm->base);
1067}
1068EXPORT_SYMBOL_GPL(cryptd_free_ahash);
1069
1070struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
1071                                                  u32 type, u32 mask)
1072{
1073        char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1074        struct cryptd_aead_ctx *ctx;
1075        struct crypto_aead *tfm;
1076
1077        if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1078                     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1079                return ERR_PTR(-EINVAL);
1080        tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
1081        if (IS_ERR(tfm))
1082                return ERR_CAST(tfm);
1083        if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1084                crypto_free_aead(tfm);
1085                return ERR_PTR(-EINVAL);
1086        }
1087
1088        ctx = crypto_aead_ctx(tfm);
1089        atomic_set(&ctx->refcnt, 1);
1090
1091        return __cryptd_aead_cast(tfm);
1092}
1093EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
1094
1095struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
1096{
1097        struct cryptd_aead_ctx *ctx;
1098        ctx = crypto_aead_ctx(&tfm->base);
1099        return ctx->child;
1100}
1101EXPORT_SYMBOL_GPL(cryptd_aead_child);
1102
1103bool cryptd_aead_queued(struct cryptd_aead *tfm)
1104{
1105        struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1106
1107        return atomic_read(&ctx->refcnt) - 1;
1108}
1109EXPORT_SYMBOL_GPL(cryptd_aead_queued);
1110
1111void cryptd_free_aead(struct cryptd_aead *tfm)
1112{
1113        struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1114
1115        if (atomic_dec_and_test(&ctx->refcnt))
1116                crypto_free_aead(&tfm->base);
1117}
1118EXPORT_SYMBOL_GPL(cryptd_free_aead);
1119
1120static int __init cryptd_init(void)
1121{
1122        int err;
1123
1124        cryptd_wq = alloc_workqueue("cryptd", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE,
1125                                    1);
1126        if (!cryptd_wq)
1127                return -ENOMEM;
1128
1129        err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
1130        if (err)
1131                goto err_destroy_wq;
1132
1133        err = crypto_register_template(&cryptd_tmpl);
1134        if (err)
1135                goto err_fini_queue;
1136
1137        return 0;
1138
1139err_fini_queue:
1140        cryptd_fini_queue(&queue);
1141err_destroy_wq:
1142        destroy_workqueue(cryptd_wq);
1143        return err;
1144}
1145
1146static void __exit cryptd_exit(void)
1147{
1148        destroy_workqueue(cryptd_wq);
1149        cryptd_fini_queue(&queue);
1150        crypto_unregister_template(&cryptd_tmpl);
1151}
1152
1153subsys_initcall(cryptd_init);
1154module_exit(cryptd_exit);
1155
1156MODULE_LICENSE("GPL");
1157MODULE_DESCRIPTION("Software async crypto daemon");
1158MODULE_ALIAS_CRYPTO("cryptd");
1159