linux/crypto/mcryptd.c
<<
>>
Prefs
   1/*
   2 * Software multibuffer async crypto daemon.
   3 *
   4 * Copyright (c) 2014 Tim Chen <tim.c.chen@linux.intel.com>
   5 *
   6 * Adapted from crypto daemon.
   7 *
   8 * This program is free software; you can redistribute it and/or modify it
   9 * under the terms of the GNU General Public License as published by the Free
  10 * Software Foundation; either version 2 of the License, or (at your option)
  11 * any later version.
  12 *
  13 */
  14
  15#include <crypto/algapi.h>
  16#include <crypto/internal/hash.h>
  17#include <crypto/internal/aead.h>
  18#include <crypto/mcryptd.h>
  19#include <crypto/crypto_wq.h>
  20#include <linux/err.h>
  21#include <linux/init.h>
  22#include <linux/kernel.h>
  23#include <linux/list.h>
  24#include <linux/module.h>
  25#include <linux/scatterlist.h>
  26#include <linux/sched.h>
  27#include <linux/slab.h>
  28#include <linux/hardirq.h>
  29
  30#define MCRYPTD_MAX_CPU_QLEN 100
  31#define MCRYPTD_BATCH 9
  32
  33static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
  34                                   unsigned int tail);
  35
  36struct mcryptd_flush_list {
  37        struct list_head list;
  38        struct mutex lock;
  39};
  40
  41static struct mcryptd_flush_list __percpu *mcryptd_flist;
  42
  43struct hashd_instance_ctx {
  44        struct crypto_ahash_spawn spawn;
  45        struct mcryptd_queue *queue;
  46};
  47
  48static void mcryptd_queue_worker(struct work_struct *work);
  49
  50void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay)
  51{
  52        struct mcryptd_flush_list *flist;
  53
  54        if (!cstate->flusher_engaged) {
  55                /* put the flusher on the flush list */
  56                flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
  57                mutex_lock(&flist->lock);
  58                list_add_tail(&cstate->flush_list, &flist->list);
  59                cstate->flusher_engaged = true;
  60                cstate->next_flush = jiffies + delay;
  61                queue_delayed_work_on(smp_processor_id(), kcrypto_wq,
  62                        &cstate->flush, delay);
  63                mutex_unlock(&flist->lock);
  64        }
  65}
  66EXPORT_SYMBOL(mcryptd_arm_flusher);
  67
  68static int mcryptd_init_queue(struct mcryptd_queue *queue,
  69                             unsigned int max_cpu_qlen)
  70{
  71        int cpu;
  72        struct mcryptd_cpu_queue *cpu_queue;
  73
  74        queue->cpu_queue = alloc_percpu(struct mcryptd_cpu_queue);
  75        pr_debug("mqueue:%p mcryptd_cpu_queue %p\n", queue, queue->cpu_queue);
  76        if (!queue->cpu_queue)
  77                return -ENOMEM;
  78        for_each_possible_cpu(cpu) {
  79                cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
  80                pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue);
  81                crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
  82                INIT_WORK(&cpu_queue->work, mcryptd_queue_worker);
  83        }
  84        return 0;
  85}
  86
  87static void mcryptd_fini_queue(struct mcryptd_queue *queue)
  88{
  89        int cpu;
  90        struct mcryptd_cpu_queue *cpu_queue;
  91
  92        for_each_possible_cpu(cpu) {
  93                cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
  94                BUG_ON(cpu_queue->queue.qlen);
  95        }
  96        free_percpu(queue->cpu_queue);
  97}
  98
  99static int mcryptd_enqueue_request(struct mcryptd_queue *queue,
 100                                  struct crypto_async_request *request,
 101                                  struct mcryptd_hash_request_ctx *rctx)
 102{
 103        int cpu, err;
 104        struct mcryptd_cpu_queue *cpu_queue;
 105
 106        cpu = get_cpu();
 107        cpu_queue = this_cpu_ptr(queue->cpu_queue);
 108        rctx->tag.cpu = cpu;
 109
 110        err = crypto_enqueue_request(&cpu_queue->queue, request);
 111        pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n",
 112                 cpu, cpu_queue, request);
 113        queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
 114        put_cpu();
 115
 116        return err;
 117}
 118
 119/*
 120 * Try to opportunisticlly flush the partially completed jobs if
 121 * crypto daemon is the only task running.
 122 */
 123static void mcryptd_opportunistic_flush(void)
 124{
 125        struct mcryptd_flush_list *flist;
 126        struct mcryptd_alg_cstate *cstate;
 127
 128        flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
 129        while (single_task_running()) {
 130                mutex_lock(&flist->lock);
 131                cstate = list_first_entry_or_null(&flist->list,
 132                                struct mcryptd_alg_cstate, flush_list);
 133                if (!cstate || !cstate->flusher_engaged) {
 134                        mutex_unlock(&flist->lock);
 135                        return;
 136                }
 137                list_del(&cstate->flush_list);
 138                cstate->flusher_engaged = false;
 139                mutex_unlock(&flist->lock);
 140                cstate->alg_state->flusher(cstate);
 141        }
 142}
 143
 144/*
 145 * Called in workqueue context, do one real cryption work (via
 146 * req->complete) and reschedule itself if there are more work to
 147 * do.
 148 */
 149static void mcryptd_queue_worker(struct work_struct *work)
 150{
 151        struct mcryptd_cpu_queue *cpu_queue;
 152        struct crypto_async_request *req, *backlog;
 153        int i;
 154
 155        /*
 156         * Need to loop through more than once for multi-buffer to
 157         * be effective.
 158         */
 159
 160        cpu_queue = container_of(work, struct mcryptd_cpu_queue, work);
 161        i = 0;
 162        while (i < MCRYPTD_BATCH || single_task_running()) {
 163                /*
 164                 * preempt_disable/enable is used to prevent
 165                 * being preempted by mcryptd_enqueue_request()
 166                 */
 167                local_bh_disable();
 168                preempt_disable();
 169                backlog = crypto_get_backlog(&cpu_queue->queue);
 170                req = crypto_dequeue_request(&cpu_queue->queue);
 171                preempt_enable();
 172                local_bh_enable();
 173
 174                if (!req) {
 175                        mcryptd_opportunistic_flush();
 176                        return;
 177                }
 178
 179                if (backlog)
 180                        backlog->complete(backlog, -EINPROGRESS);
 181                req->complete(req, 0);
 182                if (!cpu_queue->queue.qlen)
 183                        return;
 184                ++i;
 185        }
 186        if (cpu_queue->queue.qlen)
 187                queue_work(kcrypto_wq, &cpu_queue->work);
 188}
 189
 190void mcryptd_flusher(struct work_struct *__work)
 191{
 192        struct  mcryptd_alg_cstate      *alg_cpu_state;
 193        struct  mcryptd_alg_state       *alg_state;
 194        struct  mcryptd_flush_list      *flist;
 195        int     cpu;
 196
 197        cpu = smp_processor_id();
 198        alg_cpu_state = container_of(to_delayed_work(__work),
 199                                     struct mcryptd_alg_cstate, flush);
 200        alg_state = alg_cpu_state->alg_state;
 201        if (alg_cpu_state->cpu != cpu)
 202                pr_debug("mcryptd error: work on cpu %d, should be cpu %d\n",
 203                                cpu, alg_cpu_state->cpu);
 204
 205        if (alg_cpu_state->flusher_engaged) {
 206                flist = per_cpu_ptr(mcryptd_flist, cpu);
 207                mutex_lock(&flist->lock);
 208                list_del(&alg_cpu_state->flush_list);
 209                alg_cpu_state->flusher_engaged = false;
 210                mutex_unlock(&flist->lock);
 211                alg_state->flusher(alg_cpu_state);
 212        }
 213}
 214EXPORT_SYMBOL_GPL(mcryptd_flusher);
 215
 216static inline struct mcryptd_queue *mcryptd_get_queue(struct crypto_tfm *tfm)
 217{
 218        struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
 219        struct mcryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
 220
 221        return ictx->queue;
 222}
 223
 224static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
 225                                   unsigned int tail)
 226{
 227        char *p;
 228        struct crypto_instance *inst;
 229        int err;
 230
 231        p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
 232        if (!p)
 233                return ERR_PTR(-ENOMEM);
 234
 235        inst = (void *)(p + head);
 236
 237        err = -ENAMETOOLONG;
 238        if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
 239                    "mcryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
 240                goto out_free_inst;
 241
 242        memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
 243
 244        inst->alg.cra_priority = alg->cra_priority + 50;
 245        inst->alg.cra_blocksize = alg->cra_blocksize;
 246        inst->alg.cra_alignmask = alg->cra_alignmask;
 247
 248out:
 249        return p;
 250
 251out_free_inst:
 252        kfree(p);
 253        p = ERR_PTR(err);
 254        goto out;
 255}
 256
 257static inline void mcryptd_check_internal(struct rtattr **tb, u32 *type,
 258                                          u32 *mask)
 259{
 260        struct crypto_attr_type *algt;
 261
 262        algt = crypto_get_attr_type(tb);
 263        if (IS_ERR(algt))
 264                return;
 265        if ((algt->type & CRYPTO_ALG_INTERNAL))
 266                *type |= CRYPTO_ALG_INTERNAL;
 267        if ((algt->mask & CRYPTO_ALG_INTERNAL))
 268                *mask |= CRYPTO_ALG_INTERNAL;
 269}
 270
 271static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm)
 272{
 273        struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
 274        struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
 275        struct crypto_ahash_spawn *spawn = &ictx->spawn;
 276        struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 277        struct crypto_ahash *hash;
 278
 279        hash = crypto_spawn_ahash(spawn);
 280        if (IS_ERR(hash))
 281                return PTR_ERR(hash);
 282
 283        ctx->child = hash;
 284        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 285                                 sizeof(struct mcryptd_hash_request_ctx) +
 286                                 crypto_ahash_reqsize(hash));
 287        return 0;
 288}
 289
 290static void mcryptd_hash_exit_tfm(struct crypto_tfm *tfm)
 291{
 292        struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 293
 294        crypto_free_ahash(ctx->child);
 295}
 296
 297static int mcryptd_hash_setkey(struct crypto_ahash *parent,
 298                                   const u8 *key, unsigned int keylen)
 299{
 300        struct mcryptd_hash_ctx *ctx   = crypto_ahash_ctx(parent);
 301        struct crypto_ahash *child = ctx->child;
 302        int err;
 303
 304        crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
 305        crypto_ahash_set_flags(child, crypto_ahash_get_flags(parent) &
 306                                      CRYPTO_TFM_REQ_MASK);
 307        err = crypto_ahash_setkey(child, key, keylen);
 308        crypto_ahash_set_flags(parent, crypto_ahash_get_flags(child) &
 309                                       CRYPTO_TFM_RES_MASK);
 310        return err;
 311}
 312
 313static int mcryptd_hash_enqueue(struct ahash_request *req,
 314                                crypto_completion_t complete)
 315{
 316        int ret;
 317
 318        struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 319        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 320        struct mcryptd_queue *queue =
 321                mcryptd_get_queue(crypto_ahash_tfm(tfm));
 322
 323        rctx->complete = req->base.complete;
 324        req->base.complete = complete;
 325
 326        ret = mcryptd_enqueue_request(queue, &req->base, rctx);
 327
 328        return ret;
 329}
 330
 331static void mcryptd_hash_init(struct crypto_async_request *req_async, int err)
 332{
 333        struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
 334        struct crypto_ahash *child = ctx->child;
 335        struct ahash_request *req = ahash_request_cast(req_async);
 336        struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 337        struct ahash_request *desc = &rctx->areq;
 338
 339        if (unlikely(err == -EINPROGRESS))
 340                goto out;
 341
 342        ahash_request_set_tfm(desc, child);
 343        ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP,
 344                                                rctx->complete, req_async);
 345
 346        rctx->out = req->result;
 347        err = crypto_ahash_init(desc);
 348
 349out:
 350        local_bh_disable();
 351        rctx->complete(&req->base, err);
 352        local_bh_enable();
 353}
 354
 355static int mcryptd_hash_init_enqueue(struct ahash_request *req)
 356{
 357        return mcryptd_hash_enqueue(req, mcryptd_hash_init);
 358}
 359
 360static void mcryptd_hash_update(struct crypto_async_request *req_async, int err)
 361{
 362        struct ahash_request *req = ahash_request_cast(req_async);
 363        struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 364
 365        if (unlikely(err == -EINPROGRESS))
 366                goto out;
 367
 368        rctx->out = req->result;
 369        err = ahash_mcryptd_update(&rctx->areq);
 370        if (err) {
 371                req->base.complete = rctx->complete;
 372                goto out;
 373        }
 374
 375        return;
 376out:
 377        local_bh_disable();
 378        rctx->complete(&req->base, err);
 379        local_bh_enable();
 380}
 381
 382static int mcryptd_hash_update_enqueue(struct ahash_request *req)
 383{
 384        return mcryptd_hash_enqueue(req, mcryptd_hash_update);
 385}
 386
 387static void mcryptd_hash_final(struct crypto_async_request *req_async, int err)
 388{
 389        struct ahash_request *req = ahash_request_cast(req_async);
 390        struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 391
 392        if (unlikely(err == -EINPROGRESS))
 393                goto out;
 394
 395        rctx->out = req->result;
 396        err = ahash_mcryptd_final(&rctx->areq);
 397        if (err) {
 398                req->base.complete = rctx->complete;
 399                goto out;
 400        }
 401
 402        return;
 403out:
 404        local_bh_disable();
 405        rctx->complete(&req->base, err);
 406        local_bh_enable();
 407}
 408
 409static int mcryptd_hash_final_enqueue(struct ahash_request *req)
 410{
 411        return mcryptd_hash_enqueue(req, mcryptd_hash_final);
 412}
 413
 414static void mcryptd_hash_finup(struct crypto_async_request *req_async, int err)
 415{
 416        struct ahash_request *req = ahash_request_cast(req_async);
 417        struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 418
 419        if (unlikely(err == -EINPROGRESS))
 420                goto out;
 421        rctx->out = req->result;
 422        err = ahash_mcryptd_finup(&rctx->areq);
 423
 424        if (err) {
 425                req->base.complete = rctx->complete;
 426                goto out;
 427        }
 428
 429        return;
 430out:
 431        local_bh_disable();
 432        rctx->complete(&req->base, err);
 433        local_bh_enable();
 434}
 435
 436static int mcryptd_hash_finup_enqueue(struct ahash_request *req)
 437{
 438        return mcryptd_hash_enqueue(req, mcryptd_hash_finup);
 439}
 440
 441static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err)
 442{
 443        struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
 444        struct crypto_ahash *child = ctx->child;
 445        struct ahash_request *req = ahash_request_cast(req_async);
 446        struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 447        struct ahash_request *desc = &rctx->areq;
 448
 449        if (unlikely(err == -EINPROGRESS))
 450                goto out;
 451
 452        ahash_request_set_tfm(desc, child);
 453        ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP,
 454                                                rctx->complete, req_async);
 455
 456        rctx->out = req->result;
 457        err = ahash_mcryptd_digest(desc);
 458
 459out:
 460        local_bh_disable();
 461        rctx->complete(&req->base, err);
 462        local_bh_enable();
 463}
 464
 465static int mcryptd_hash_digest_enqueue(struct ahash_request *req)
 466{
 467        return mcryptd_hash_enqueue(req, mcryptd_hash_digest);
 468}
 469
 470static int mcryptd_hash_export(struct ahash_request *req, void *out)
 471{
 472        struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 473
 474        return crypto_ahash_export(&rctx->areq, out);
 475}
 476
 477static int mcryptd_hash_import(struct ahash_request *req, const void *in)
 478{
 479        struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 480
 481        return crypto_ahash_import(&rctx->areq, in);
 482}
 483
 484static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
 485                              struct mcryptd_queue *queue)
 486{
 487        struct hashd_instance_ctx *ctx;
 488        struct ahash_instance *inst;
 489        struct hash_alg_common *halg;
 490        struct crypto_alg *alg;
 491        u32 type = 0;
 492        u32 mask = 0;
 493        int err;
 494
 495        mcryptd_check_internal(tb, &type, &mask);
 496
 497        halg = ahash_attr_alg(tb[1], type, mask);
 498        if (IS_ERR(halg))
 499                return PTR_ERR(halg);
 500
 501        alg = &halg->base;
 502        pr_debug("crypto: mcryptd hash alg: %s\n", alg->cra_name);
 503        inst = mcryptd_alloc_instance(alg, ahash_instance_headroom(),
 504                                        sizeof(*ctx));
 505        err = PTR_ERR(inst);
 506        if (IS_ERR(inst))
 507                goto out_put_alg;
 508
 509        ctx = ahash_instance_ctx(inst);
 510        ctx->queue = queue;
 511
 512        err = crypto_init_ahash_spawn(&ctx->spawn, halg,
 513                                      ahash_crypto_instance(inst));
 514        if (err)
 515                goto out_free_inst;
 516
 517        type = CRYPTO_ALG_ASYNC;
 518        if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
 519                type |= CRYPTO_ALG_INTERNAL;
 520        inst->alg.halg.base.cra_flags = type;
 521
 522        inst->alg.halg.digestsize = halg->digestsize;
 523        inst->alg.halg.statesize = halg->statesize;
 524        inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx);
 525
 526        inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm;
 527        inst->alg.halg.base.cra_exit = mcryptd_hash_exit_tfm;
 528
 529        inst->alg.init   = mcryptd_hash_init_enqueue;
 530        inst->alg.update = mcryptd_hash_update_enqueue;
 531        inst->alg.final  = mcryptd_hash_final_enqueue;
 532        inst->alg.finup  = mcryptd_hash_finup_enqueue;
 533        inst->alg.export = mcryptd_hash_export;
 534        inst->alg.import = mcryptd_hash_import;
 535        inst->alg.setkey = mcryptd_hash_setkey;
 536        inst->alg.digest = mcryptd_hash_digest_enqueue;
 537
 538        err = ahash_register_instance(tmpl, inst);
 539        if (err) {
 540                crypto_drop_ahash(&ctx->spawn);
 541out_free_inst:
 542                kfree(inst);
 543        }
 544
 545out_put_alg:
 546        crypto_mod_put(alg);
 547        return err;
 548}
 549
 550static struct mcryptd_queue mqueue;
 551
 552static int mcryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
 553{
 554        struct crypto_attr_type *algt;
 555
 556        algt = crypto_get_attr_type(tb);
 557        if (IS_ERR(algt))
 558                return PTR_ERR(algt);
 559
 560        switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
 561        case CRYPTO_ALG_TYPE_DIGEST:
 562                return mcryptd_create_hash(tmpl, tb, &mqueue);
 563        break;
 564        }
 565
 566        return -EINVAL;
 567}
 568
 569static void mcryptd_free(struct crypto_instance *inst)
 570{
 571        struct mcryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
 572        struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
 573
 574        switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
 575        case CRYPTO_ALG_TYPE_AHASH:
 576                crypto_drop_ahash(&hctx->spawn);
 577                kfree(ahash_instance(inst));
 578                return;
 579        default:
 580                crypto_drop_spawn(&ctx->spawn);
 581                kfree(inst);
 582        }
 583}
 584
 585static struct crypto_template mcryptd_tmpl = {
 586        .name = "mcryptd",
 587        .create = mcryptd_create,
 588        .free = mcryptd_free,
 589        .module = THIS_MODULE,
 590};
 591
 592struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
 593                                        u32 type, u32 mask)
 594{
 595        char mcryptd_alg_name[CRYPTO_MAX_ALG_NAME];
 596        struct crypto_ahash *tfm;
 597
 598        if (snprintf(mcryptd_alg_name, CRYPTO_MAX_ALG_NAME,
 599                     "mcryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
 600                return ERR_PTR(-EINVAL);
 601        tfm = crypto_alloc_ahash(mcryptd_alg_name, type, mask);
 602        if (IS_ERR(tfm))
 603                return ERR_CAST(tfm);
 604        if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
 605                crypto_free_ahash(tfm);
 606                return ERR_PTR(-EINVAL);
 607        }
 608
 609        return __mcryptd_ahash_cast(tfm);
 610}
 611EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash);
 612
 613int ahash_mcryptd_digest(struct ahash_request *desc)
 614{
 615        int err;
 616
 617        err = crypto_ahash_init(desc) ?:
 618              ahash_mcryptd_finup(desc);
 619
 620        return err;
 621}
 622
 623int ahash_mcryptd_update(struct ahash_request *desc)
 624{
 625        /* alignment is to be done by multi-buffer crypto algorithm if needed */
 626
 627        return crypto_ahash_update(desc);
 628}
 629
 630int ahash_mcryptd_finup(struct ahash_request *desc)
 631{
 632        /* alignment is to be done by multi-buffer crypto algorithm if needed */
 633
 634        return crypto_ahash_finup(desc);
 635}
 636
 637int ahash_mcryptd_final(struct ahash_request *desc)
 638{
 639        /* alignment is to be done by multi-buffer crypto algorithm if needed */
 640
 641        return crypto_ahash_final(desc);
 642}
 643
 644struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm)
 645{
 646        struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
 647
 648        return ctx->child;
 649}
 650EXPORT_SYMBOL_GPL(mcryptd_ahash_child);
 651
 652struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req)
 653{
 654        struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 655        return &rctx->areq;
 656}
 657EXPORT_SYMBOL_GPL(mcryptd_ahash_desc);
 658
 659void mcryptd_free_ahash(struct mcryptd_ahash *tfm)
 660{
 661        crypto_free_ahash(&tfm->base);
 662}
 663EXPORT_SYMBOL_GPL(mcryptd_free_ahash);
 664
 665static int __init mcryptd_init(void)
 666{
 667        int err, cpu;
 668        struct mcryptd_flush_list *flist;
 669
 670        mcryptd_flist = alloc_percpu(struct mcryptd_flush_list);
 671        for_each_possible_cpu(cpu) {
 672                flist = per_cpu_ptr(mcryptd_flist, cpu);
 673                INIT_LIST_HEAD(&flist->list);
 674                mutex_init(&flist->lock);
 675        }
 676
 677        err = mcryptd_init_queue(&mqueue, MCRYPTD_MAX_CPU_QLEN);
 678        if (err) {
 679                free_percpu(mcryptd_flist);
 680                return err;
 681        }
 682
 683        err = crypto_register_template(&mcryptd_tmpl);
 684        if (err) {
 685                mcryptd_fini_queue(&mqueue);
 686                free_percpu(mcryptd_flist);
 687        }
 688
 689        return err;
 690}
 691
 692static void __exit mcryptd_exit(void)
 693{
 694        mcryptd_fini_queue(&mqueue);
 695        crypto_unregister_template(&mcryptd_tmpl);
 696        free_percpu(mcryptd_flist);
 697}
 698
 699subsys_initcall(mcryptd_init);
 700module_exit(mcryptd_exit);
 701
 702MODULE_LICENSE("GPL");
 703MODULE_DESCRIPTION("Software async multibuffer crypto daemon");
 704MODULE_ALIAS_CRYPTO("mcryptd");
 705