linux/crypto/ablkcipher.c
<<
>>
Prefs
   1/*
   2 * Asynchronous block chaining cipher operations.
   3 *
   4 * This is the asynchronous version of blkcipher.c indicating completion
   5 * via a callback.
   6 *
   7 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
   8 *
   9 * This program is free software; you can redistribute it and/or modify it
  10 * under the terms of the GNU General Public License as published by the Free
  11 * Software Foundation; either version 2 of the License, or (at your option)
  12 * any later version.
  13 *
  14 */
  15
  16#include <crypto/internal/skcipher.h>
  17#include <linux/cpumask.h>
  18#include <linux/err.h>
  19#include <linux/kernel.h>
  20#include <linux/rtnetlink.h>
  21#include <linux/sched.h>
  22#include <linux/slab.h>
  23#include <linux/seq_file.h>
  24#include <linux/cryptouser.h>
  25#include <net/netlink.h>
  26
  27#include <crypto/scatterwalk.h>
  28
  29#include "internal.h"
  30
  31struct ablkcipher_buffer {
  32        struct list_head        entry;
  33        struct scatter_walk     dst;
  34        unsigned int            len;
  35        void                    *data;
  36};
  37
  38enum {
  39        ABLKCIPHER_WALK_SLOW = 1 << 0,
  40};
  41
  42static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
  43{
  44        scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
  45}
  46
  47void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
  48{
  49        struct ablkcipher_buffer *p, *tmp;
  50
  51        list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
  52                ablkcipher_buffer_write(p);
  53                list_del(&p->entry);
  54                kfree(p);
  55        }
  56}
  57EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
  58
  59static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
  60                                          struct ablkcipher_buffer *p)
  61{
  62        p->dst = walk->out;
  63        list_add_tail(&p->entry, &walk->buffers);
  64}
  65
  66/* Get a spot of the specified length that does not straddle a page.
  67 * The caller needs to ensure that there is enough space for this operation.
  68 */
  69static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
  70{
  71        u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
  72        return max(start, end_page);
  73}
  74
  75static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
  76                                                unsigned int bsize)
  77{
  78        unsigned int n = bsize;
  79
  80        for (;;) {
  81                unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
  82
  83                if (len_this_page > n)
  84                        len_this_page = n;
  85                scatterwalk_advance(&walk->out, n);
  86                if (n == len_this_page)
  87                        break;
  88                n -= len_this_page;
  89                scatterwalk_start(&walk->out, scatterwalk_sg_next(walk->out.sg));
  90        }
  91
  92        return bsize;
  93}
  94
  95static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk,
  96                                                unsigned int n)
  97{
  98        scatterwalk_advance(&walk->in, n);
  99        scatterwalk_advance(&walk->out, n);
 100
 101        return n;
 102}
 103
 104static int ablkcipher_walk_next(struct ablkcipher_request *req,
 105                                struct ablkcipher_walk *walk);
 106
 107int ablkcipher_walk_done(struct ablkcipher_request *req,
 108                         struct ablkcipher_walk *walk, int err)
 109{
 110        struct crypto_tfm *tfm = req->base.tfm;
 111        unsigned int nbytes = 0;
 112
 113        if (likely(err >= 0)) {
 114                unsigned int n = walk->nbytes - err;
 115
 116                if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW)))
 117                        n = ablkcipher_done_fast(walk, n);
 118                else if (WARN_ON(err)) {
 119                        err = -EINVAL;
 120                        goto err;
 121                } else
 122                        n = ablkcipher_done_slow(walk, n);
 123
 124                nbytes = walk->total - n;
 125                err = 0;
 126        }
 127
 128        scatterwalk_done(&walk->in, 0, nbytes);
 129        scatterwalk_done(&walk->out, 1, nbytes);
 130
 131err:
 132        walk->total = nbytes;
 133        walk->nbytes = nbytes;
 134
 135        if (nbytes) {
 136                crypto_yield(req->base.flags);
 137                return ablkcipher_walk_next(req, walk);
 138        }
 139
 140        if (walk->iv != req->info)
 141                memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
 142        kfree(walk->iv_buffer);
 143
 144        return err;
 145}
 146EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
 147
 148static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
 149                                       struct ablkcipher_walk *walk,
 150                                       unsigned int bsize,
 151                                       unsigned int alignmask,
 152                                       void **src_p, void **dst_p)
 153{
 154        unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
 155        struct ablkcipher_buffer *p;
 156        void *src, *dst, *base;
 157        unsigned int n;
 158
 159        n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
 160        n += (aligned_bsize * 3 - (alignmask + 1) +
 161              (alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
 162
 163        p = kmalloc(n, GFP_ATOMIC);
 164        if (!p)
 165                return ablkcipher_walk_done(req, walk, -ENOMEM);
 166
 167        base = p + 1;
 168
 169        dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
 170        src = dst = ablkcipher_get_spot(dst, bsize);
 171
 172        p->len = bsize;
 173        p->data = dst;
 174
 175        scatterwalk_copychunks(src, &walk->in, bsize, 0);
 176
 177        ablkcipher_queue_write(walk, p);
 178
 179        walk->nbytes = bsize;
 180        walk->flags |= ABLKCIPHER_WALK_SLOW;
 181
 182        *src_p = src;
 183        *dst_p = dst;
 184
 185        return 0;
 186}
 187
 188static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
 189                                     struct crypto_tfm *tfm,
 190                                     unsigned int alignmask)
 191{
 192        unsigned bs = walk->blocksize;
 193        unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
 194        unsigned aligned_bs = ALIGN(bs, alignmask + 1);
 195        unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
 196                            (alignmask + 1);
 197        u8 *iv;
 198
 199        size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
 200        walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
 201        if (!walk->iv_buffer)
 202                return -ENOMEM;
 203
 204        iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
 205        iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
 206        iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
 207        iv = ablkcipher_get_spot(iv, ivsize);
 208
 209        walk->iv = memcpy(iv, walk->iv, ivsize);
 210        return 0;
 211}
 212
 213static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
 214                                       struct ablkcipher_walk *walk)
 215{
 216        walk->src.page = scatterwalk_page(&walk->in);
 217        walk->src.offset = offset_in_page(walk->in.offset);
 218        walk->dst.page = scatterwalk_page(&walk->out);
 219        walk->dst.offset = offset_in_page(walk->out.offset);
 220
 221        return 0;
 222}
 223
 224static int ablkcipher_walk_next(struct ablkcipher_request *req,
 225                                struct ablkcipher_walk *walk)
 226{
 227        struct crypto_tfm *tfm = req->base.tfm;
 228        unsigned int alignmask, bsize, n;
 229        void *src, *dst;
 230        int err;
 231
 232        alignmask = crypto_tfm_alg_alignmask(tfm);
 233        n = walk->total;
 234        if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
 235                req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
 236                return ablkcipher_walk_done(req, walk, -EINVAL);
 237        }
 238
 239        walk->flags &= ~ABLKCIPHER_WALK_SLOW;
 240        src = dst = NULL;
 241
 242        bsize = min(walk->blocksize, n);
 243        n = scatterwalk_clamp(&walk->in, n);
 244        n = scatterwalk_clamp(&walk->out, n);
 245
 246        if (n < bsize ||
 247            !scatterwalk_aligned(&walk->in, alignmask) ||
 248            !scatterwalk_aligned(&walk->out, alignmask)) {
 249                err = ablkcipher_next_slow(req, walk, bsize, alignmask,
 250                                           &src, &dst);
 251                goto set_phys_lowmem;
 252        }
 253
 254        walk->nbytes = n;
 255
 256        return ablkcipher_next_fast(req, walk);
 257
 258set_phys_lowmem:
 259        if (err >= 0) {
 260                walk->src.page = virt_to_page(src);
 261                walk->dst.page = virt_to_page(dst);
 262                walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
 263                walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
 264        }
 265
 266        return err;
 267}
 268
 269static int ablkcipher_walk_first(struct ablkcipher_request *req,
 270                                 struct ablkcipher_walk *walk)
 271{
 272        struct crypto_tfm *tfm = req->base.tfm;
 273        unsigned int alignmask;
 274
 275        alignmask = crypto_tfm_alg_alignmask(tfm);
 276        if (WARN_ON_ONCE(in_irq()))
 277                return -EDEADLK;
 278
 279        walk->nbytes = walk->total;
 280        if (unlikely(!walk->total))
 281                return 0;
 282
 283        walk->iv_buffer = NULL;
 284        walk->iv = req->info;
 285        if (unlikely(((unsigned long)walk->iv & alignmask))) {
 286                int err = ablkcipher_copy_iv(walk, tfm, alignmask);
 287                if (err)
 288                        return err;
 289        }
 290
 291        scatterwalk_start(&walk->in, walk->in.sg);
 292        scatterwalk_start(&walk->out, walk->out.sg);
 293
 294        return ablkcipher_walk_next(req, walk);
 295}
 296
 297int ablkcipher_walk_phys(struct ablkcipher_request *req,
 298                         struct ablkcipher_walk *walk)
 299{
 300        walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
 301        return ablkcipher_walk_first(req, walk);
 302}
 303EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
 304
 305static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
 306                            unsigned int keylen)
 307{
 308        struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
 309        unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
 310        int ret;
 311        u8 *buffer, *alignbuffer;
 312        unsigned long absize;
 313
 314        absize = keylen + alignmask;
 315        buffer = kmalloc(absize, GFP_ATOMIC);
 316        if (!buffer)
 317                return -ENOMEM;
 318
 319        alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
 320        memcpy(alignbuffer, key, keylen);
 321        ret = cipher->setkey(tfm, alignbuffer, keylen);
 322        memset(alignbuffer, 0, keylen);
 323        kfree(buffer);
 324        return ret;
 325}
 326
 327static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 328                  unsigned int keylen)
 329{
 330        struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
 331        unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
 332
 333        if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
 334                crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 335                return -EINVAL;
 336        }
 337
 338        if ((unsigned long)key & alignmask)
 339                return setkey_unaligned(tfm, key, keylen);
 340
 341        return cipher->setkey(tfm, key, keylen);
 342}
 343
 344static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
 345                                              u32 mask)
 346{
 347        return alg->cra_ctxsize;
 348}
 349
 350int skcipher_null_givencrypt(struct skcipher_givcrypt_request *req)
 351{
 352        return crypto_ablkcipher_encrypt(&req->creq);
 353}
 354
 355int skcipher_null_givdecrypt(struct skcipher_givcrypt_request *req)
 356{
 357        return crypto_ablkcipher_decrypt(&req->creq);
 358}
 359
 360static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
 361                                      u32 mask)
 362{
 363        struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
 364        struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
 365
 366        if (alg->ivsize > PAGE_SIZE / 8)
 367                return -EINVAL;
 368
 369        crt->setkey = setkey;
 370        crt->encrypt = alg->encrypt;
 371        crt->decrypt = alg->decrypt;
 372        if (!alg->ivsize) {
 373                crt->givencrypt = skcipher_null_givencrypt;
 374                crt->givdecrypt = skcipher_null_givdecrypt;
 375        }
 376        crt->base = __crypto_ablkcipher_cast(tfm);
 377        crt->ivsize = alg->ivsize;
 378
 379        return 0;
 380}
 381
 382#ifdef CONFIG_NET
 383static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
 384{
 385        struct crypto_report_blkcipher rblkcipher;
 386
 387        strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
 388        strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
 389                sizeof(rblkcipher.geniv));
 390
 391        rblkcipher.blocksize = alg->cra_blocksize;
 392        rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
 393        rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
 394        rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
 395
 396        if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
 397                    sizeof(struct crypto_report_blkcipher), &rblkcipher))
 398                goto nla_put_failure;
 399        return 0;
 400
 401nla_put_failure:
 402        return -EMSGSIZE;
 403}
 404#else
 405static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
 406{
 407        return -ENOSYS;
 408}
 409#endif
 410
 411static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
 412        __attribute__ ((unused));
 413static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
 414{
 415        struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
 416
 417        seq_printf(m, "type         : ablkcipher\n");
 418        seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
 419                                             "yes" : "no");
 420        seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
 421        seq_printf(m, "min keysize  : %u\n", ablkcipher->min_keysize);
 422        seq_printf(m, "max keysize  : %u\n", ablkcipher->max_keysize);
 423        seq_printf(m, "ivsize       : %u\n", ablkcipher->ivsize);
 424        seq_printf(m, "geniv        : %s\n", ablkcipher->geniv ?: "<default>");
 425}
 426
 427const struct crypto_type crypto_ablkcipher_type = {
 428        .ctxsize = crypto_ablkcipher_ctxsize,
 429        .init = crypto_init_ablkcipher_ops,
 430#ifdef CONFIG_PROC_FS
 431        .show = crypto_ablkcipher_show,
 432#endif
 433        .report = crypto_ablkcipher_report,
 434};
 435EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
 436
 437static int no_givdecrypt(struct skcipher_givcrypt_request *req)
 438{
 439        return -ENOSYS;
 440}
 441
 442static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
 443                                      u32 mask)
 444{
 445        struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
 446        struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
 447
 448        if (alg->ivsize > PAGE_SIZE / 8)
 449                return -EINVAL;
 450
 451        crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ?
 452                      alg->setkey : setkey;
 453        crt->encrypt = alg->encrypt;
 454        crt->decrypt = alg->decrypt;
 455        crt->givencrypt = alg->givencrypt;
 456        crt->givdecrypt = alg->givdecrypt ?: no_givdecrypt;
 457        crt->base = __crypto_ablkcipher_cast(tfm);
 458        crt->ivsize = alg->ivsize;
 459
 460        return 0;
 461}
 462
 463#ifdef CONFIG_NET
 464static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
 465{
 466        struct crypto_report_blkcipher rblkcipher;
 467
 468        strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
 469        strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
 470                sizeof(rblkcipher.geniv));
 471
 472        rblkcipher.blocksize = alg->cra_blocksize;
 473        rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
 474        rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
 475        rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
 476
 477        if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
 478                    sizeof(struct crypto_report_blkcipher), &rblkcipher))
 479                goto nla_put_failure;
 480        return 0;
 481
 482nla_put_failure:
 483        return -EMSGSIZE;
 484}
 485#else
 486static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
 487{
 488        return -ENOSYS;
 489}
 490#endif
 491
 492static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
 493        __attribute__ ((unused));
 494static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
 495{
 496        struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
 497
 498        seq_printf(m, "type         : givcipher\n");
 499        seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
 500                                             "yes" : "no");
 501        seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
 502        seq_printf(m, "min keysize  : %u\n", ablkcipher->min_keysize);
 503        seq_printf(m, "max keysize  : %u\n", ablkcipher->max_keysize);
 504        seq_printf(m, "ivsize       : %u\n", ablkcipher->ivsize);
 505        seq_printf(m, "geniv        : %s\n", ablkcipher->geniv ?: "<built-in>");
 506}
 507
 508const struct crypto_type crypto_givcipher_type = {
 509        .ctxsize = crypto_ablkcipher_ctxsize,
 510        .init = crypto_init_givcipher_ops,
 511#ifdef CONFIG_PROC_FS
 512        .show = crypto_givcipher_show,
 513#endif
 514        .report = crypto_givcipher_report,
 515};
 516EXPORT_SYMBOL_GPL(crypto_givcipher_type);
 517
 518const char *crypto_default_geniv(const struct crypto_alg *alg)
 519{
 520        if (((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
 521             CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
 522                                         alg->cra_ablkcipher.ivsize) !=
 523            alg->cra_blocksize)
 524                return "chainiv";
 525
 526        return "eseqiv";
 527}
 528
 529static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
 530{
 531        struct rtattr *tb[3];
 532        struct {
 533                struct rtattr attr;
 534                struct crypto_attr_type data;
 535        } ptype;
 536        struct {
 537                struct rtattr attr;
 538                struct crypto_attr_alg data;
 539        } palg;
 540        struct crypto_template *tmpl;
 541        struct crypto_instance *inst;
 542        struct crypto_alg *larval;
 543        const char *geniv;
 544        int err;
 545
 546        larval = crypto_larval_lookup(alg->cra_driver_name,
 547                                      (type & ~CRYPTO_ALG_TYPE_MASK) |
 548                                      CRYPTO_ALG_TYPE_GIVCIPHER,
 549                                      mask | CRYPTO_ALG_TYPE_MASK);
 550        err = PTR_ERR(larval);
 551        if (IS_ERR(larval))
 552                goto out;
 553
 554        err = -EAGAIN;
 555        if (!crypto_is_larval(larval))
 556                goto drop_larval;
 557
 558        ptype.attr.rta_len = sizeof(ptype);
 559        ptype.attr.rta_type = CRYPTOA_TYPE;
 560        ptype.data.type = type | CRYPTO_ALG_GENIV;
 561        /* GENIV tells the template that we're making a default geniv. */
 562        ptype.data.mask = mask | CRYPTO_ALG_GENIV;
 563        tb[0] = &ptype.attr;
 564
 565        palg.attr.rta_len = sizeof(palg);
 566        palg.attr.rta_type = CRYPTOA_ALG;
 567        /* Must use the exact name to locate ourselves. */
 568        memcpy(palg.data.name, alg->cra_driver_name, CRYPTO_MAX_ALG_NAME);
 569        tb[1] = &palg.attr;
 570
 571        tb[2] = NULL;
 572
 573        if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
 574            CRYPTO_ALG_TYPE_BLKCIPHER)
 575                geniv = alg->cra_blkcipher.geniv;
 576        else
 577                geniv = alg->cra_ablkcipher.geniv;
 578
 579        if (!geniv)
 580                geniv = crypto_default_geniv(alg);
 581
 582        tmpl = crypto_lookup_template(geniv);
 583        err = -ENOENT;
 584        if (!tmpl)
 585                goto kill_larval;
 586
 587        inst = tmpl->alloc(tb);
 588        err = PTR_ERR(inst);
 589        if (IS_ERR(inst))
 590                goto put_tmpl;
 591
 592        if ((err = crypto_register_instance(tmpl, inst))) {
 593                tmpl->free(inst);
 594                goto put_tmpl;
 595        }
 596
 597        /* Redo the lookup to use the instance we just registered. */
 598        err = -EAGAIN;
 599
 600put_tmpl:
 601        crypto_tmpl_put(tmpl);
 602kill_larval:
 603        crypto_larval_kill(larval);
 604drop_larval:
 605        crypto_mod_put(larval);
 606out:
 607        crypto_mod_put(alg);
 608        return err;
 609}
 610
 611struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask)
 612{
 613        struct crypto_alg *alg;
 614
 615        alg = crypto_alg_mod_lookup(name, type, mask);
 616        if (IS_ERR(alg))
 617                return alg;
 618
 619        if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
 620            CRYPTO_ALG_TYPE_GIVCIPHER)
 621                return alg;
 622
 623        if (!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
 624              CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
 625                                          alg->cra_ablkcipher.ivsize))
 626                return alg;
 627
 628        crypto_mod_put(alg);
 629        alg = crypto_alg_mod_lookup(name, type | CRYPTO_ALG_TESTED,
 630                                    mask & ~CRYPTO_ALG_TESTED);
 631        if (IS_ERR(alg))
 632                return alg;
 633
 634        if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
 635            CRYPTO_ALG_TYPE_GIVCIPHER) {
 636                if ((alg->cra_flags ^ type ^ ~mask) & CRYPTO_ALG_TESTED) {
 637                        crypto_mod_put(alg);
 638                        alg = ERR_PTR(-ENOENT);
 639                }
 640                return alg;
 641        }
 642
 643        BUG_ON(!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
 644                 CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
 645                                             alg->cra_ablkcipher.ivsize));
 646
 647        return ERR_PTR(crypto_givcipher_default(alg, type, mask));
 648}
 649EXPORT_SYMBOL_GPL(crypto_lookup_skcipher);
 650
 651int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
 652                         u32 type, u32 mask)
 653{
 654        struct crypto_alg *alg;
 655        int err;
 656
 657        type = crypto_skcipher_type(type);
 658        mask = crypto_skcipher_mask(mask);
 659
 660        alg = crypto_lookup_skcipher(name, type, mask);
 661        if (IS_ERR(alg))
 662                return PTR_ERR(alg);
 663
 664        err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
 665        crypto_mod_put(alg);
 666        return err;
 667}
 668EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
 669
 670struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
 671                                                  u32 type, u32 mask)
 672{
 673        struct crypto_tfm *tfm;
 674        int err;
 675
 676        type = crypto_skcipher_type(type);
 677        mask = crypto_skcipher_mask(mask);
 678
 679        for (;;) {
 680                struct crypto_alg *alg;
 681
 682                alg = crypto_lookup_skcipher(alg_name, type, mask);
 683                if (IS_ERR(alg)) {
 684                        err = PTR_ERR(alg);
 685                        goto err;
 686                }
 687
 688                tfm = __crypto_alloc_tfm(alg, type, mask);
 689                if (!IS_ERR(tfm))
 690                        return __crypto_ablkcipher_cast(tfm);
 691
 692                crypto_mod_put(alg);
 693                err = PTR_ERR(tfm);
 694
 695err:
 696                if (err != -EAGAIN)
 697                        break;
 698                if (signal_pending(current)) {
 699                        err = -EINTR;
 700                        break;
 701                }
 702        }
 703
 704        return ERR_PTR(err);
 705}
 706EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher);
 707