linux/crypto/ablkcipher.c
<<
>>
Prefs
   1/*
   2 * Asynchronous block chaining cipher operations.
   3 *
   4 * This is the asynchronous version of blkcipher.c indicating completion
   5 * via a callback.
   6 *
   7 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
   8 *
   9 * This program is free software; you can redistribute it and/or modify it
  10 * under the terms of the GNU General Public License as published by the Free
  11 * Software Foundation; either version 2 of the License, or (at your option)
  12 * any later version.
  13 *
  14 */
  15
  16#include <crypto/internal/skcipher.h>
  17#include <linux/cpumask.h>
  18#include <linux/err.h>
  19#include <linux/init.h>
  20#include <linux/kernel.h>
  21#include <linux/module.h>
  22#include <linux/rtnetlink.h>
  23#include <linux/sched.h>
  24#include <linux/slab.h>
  25#include <linux/seq_file.h>
  26
  27#include <crypto/scatterwalk.h>
  28
  29#include "internal.h"
  30
  31static const char *skcipher_default_geniv __read_mostly;
  32
  33struct ablkcipher_buffer {
  34        struct list_head        entry;
  35        struct scatter_walk     dst;
  36        unsigned int            len;
  37        void                    *data;
  38};
  39
  40enum {
  41        ABLKCIPHER_WALK_SLOW = 1 << 0,
  42};
  43
  44static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
  45{
  46        scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
  47}
  48
  49void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
  50{
  51        struct ablkcipher_buffer *p, *tmp;
  52
  53        list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
  54                ablkcipher_buffer_write(p);
  55                list_del(&p->entry);
  56                kfree(p);
  57        }
  58}
  59EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
  60
  61static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
  62                                          struct ablkcipher_buffer *p)
  63{
  64        p->dst = walk->out;
  65        list_add_tail(&p->entry, &walk->buffers);
  66}
  67
  68/* Get a spot of the specified length that does not straddle a page.
  69 * The caller needs to ensure that there is enough space for this operation.
  70 */
  71static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
  72{
  73        u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
  74        return max(start, end_page);
  75}
  76
  77static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
  78                                                unsigned int bsize)
  79{
  80        unsigned int n = bsize;
  81
  82        for (;;) {
  83                unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
  84
  85                if (len_this_page > n)
  86                        len_this_page = n;
  87                scatterwalk_advance(&walk->out, n);
  88                if (n == len_this_page)
  89                        break;
  90                n -= len_this_page;
  91                scatterwalk_start(&walk->out, scatterwalk_sg_next(walk->out.sg));
  92        }
  93
  94        return bsize;
  95}
  96
  97static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk,
  98                                                unsigned int n)
  99{
 100        scatterwalk_advance(&walk->in, n);
 101        scatterwalk_advance(&walk->out, n);
 102
 103        return n;
 104}
 105
 106static int ablkcipher_walk_next(struct ablkcipher_request *req,
 107                                struct ablkcipher_walk *walk);
 108
 109int ablkcipher_walk_done(struct ablkcipher_request *req,
 110                         struct ablkcipher_walk *walk, int err)
 111{
 112        struct crypto_tfm *tfm = req->base.tfm;
 113        unsigned int nbytes = 0;
 114
 115        if (likely(err >= 0)) {
 116                unsigned int n = walk->nbytes - err;
 117
 118                if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW)))
 119                        n = ablkcipher_done_fast(walk, n);
 120                else if (WARN_ON(err)) {
 121                        err = -EINVAL;
 122                        goto err;
 123                } else
 124                        n = ablkcipher_done_slow(walk, n);
 125
 126                nbytes = walk->total - n;
 127                err = 0;
 128        }
 129
 130        scatterwalk_done(&walk->in, 0, nbytes);
 131        scatterwalk_done(&walk->out, 1, nbytes);
 132
 133err:
 134        walk->total = nbytes;
 135        walk->nbytes = nbytes;
 136
 137        if (nbytes) {
 138                crypto_yield(req->base.flags);
 139                return ablkcipher_walk_next(req, walk);
 140        }
 141
 142        if (walk->iv != req->info)
 143                memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
 144        kfree(walk->iv_buffer);
 145
 146        return err;
 147}
 148EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
 149
 150static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
 151                                       struct ablkcipher_walk *walk,
 152                                       unsigned int bsize,
 153                                       unsigned int alignmask,
 154                                       void **src_p, void **dst_p)
 155{
 156        unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
 157        struct ablkcipher_buffer *p;
 158        void *src, *dst, *base;
 159        unsigned int n;
 160
 161        n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
 162        n += (aligned_bsize * 3 - (alignmask + 1) +
 163              (alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
 164
 165        p = kmalloc(n, GFP_ATOMIC);
 166        if (!p)
 167                return ablkcipher_walk_done(req, walk, -ENOMEM);
 168
 169        base = p + 1;
 170
 171        dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
 172        src = dst = ablkcipher_get_spot(dst, bsize);
 173
 174        p->len = bsize;
 175        p->data = dst;
 176
 177        scatterwalk_copychunks(src, &walk->in, bsize, 0);
 178
 179        ablkcipher_queue_write(walk, p);
 180
 181        walk->nbytes = bsize;
 182        walk->flags |= ABLKCIPHER_WALK_SLOW;
 183
 184        *src_p = src;
 185        *dst_p = dst;
 186
 187        return 0;
 188}
 189
 190static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
 191                                     struct crypto_tfm *tfm,
 192                                     unsigned int alignmask)
 193{
 194        unsigned bs = walk->blocksize;
 195        unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
 196        unsigned aligned_bs = ALIGN(bs, alignmask + 1);
 197        unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
 198                            (alignmask + 1);
 199        u8 *iv;
 200
 201        size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
 202        walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
 203        if (!walk->iv_buffer)
 204                return -ENOMEM;
 205
 206        iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
 207        iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
 208        iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
 209        iv = ablkcipher_get_spot(iv, ivsize);
 210
 211        walk->iv = memcpy(iv, walk->iv, ivsize);
 212        return 0;
 213}
 214
 215static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
 216                                       struct ablkcipher_walk *walk)
 217{
 218        walk->src.page = scatterwalk_page(&walk->in);
 219        walk->src.offset = offset_in_page(walk->in.offset);
 220        walk->dst.page = scatterwalk_page(&walk->out);
 221        walk->dst.offset = offset_in_page(walk->out.offset);
 222
 223        return 0;
 224}
 225
 226static int ablkcipher_walk_next(struct ablkcipher_request *req,
 227                                struct ablkcipher_walk *walk)
 228{
 229        struct crypto_tfm *tfm = req->base.tfm;
 230        unsigned int alignmask, bsize, n;
 231        void *src, *dst;
 232        int err;
 233
 234        alignmask = crypto_tfm_alg_alignmask(tfm);
 235        n = walk->total;
 236        if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
 237                req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
 238                return ablkcipher_walk_done(req, walk, -EINVAL);
 239        }
 240
 241        walk->flags &= ~ABLKCIPHER_WALK_SLOW;
 242        src = dst = NULL;
 243
 244        bsize = min(walk->blocksize, n);
 245        n = scatterwalk_clamp(&walk->in, n);
 246        n = scatterwalk_clamp(&walk->out, n);
 247
 248        if (n < bsize ||
 249            !scatterwalk_aligned(&walk->in, alignmask) ||
 250            !scatterwalk_aligned(&walk->out, alignmask)) {
 251                err = ablkcipher_next_slow(req, walk, bsize, alignmask,
 252                                           &src, &dst);
 253                goto set_phys_lowmem;
 254        }
 255
 256        walk->nbytes = n;
 257
 258        return ablkcipher_next_fast(req, walk);
 259
 260set_phys_lowmem:
 261        if (err >= 0) {
 262                walk->src.page = virt_to_page(src);
 263                walk->dst.page = virt_to_page(dst);
 264                walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
 265                walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
 266        }
 267
 268        return err;
 269}
 270
 271static int ablkcipher_walk_first(struct ablkcipher_request *req,
 272                                 struct ablkcipher_walk *walk)
 273{
 274        struct crypto_tfm *tfm = req->base.tfm;
 275        unsigned int alignmask;
 276
 277        alignmask = crypto_tfm_alg_alignmask(tfm);
 278        if (WARN_ON_ONCE(in_irq()))
 279                return -EDEADLK;
 280
 281        walk->nbytes = walk->total;
 282        if (unlikely(!walk->total))
 283                return 0;
 284
 285        walk->iv_buffer = NULL;
 286        walk->iv = req->info;
 287        if (unlikely(((unsigned long)walk->iv & alignmask))) {
 288                int err = ablkcipher_copy_iv(walk, tfm, alignmask);
 289                if (err)
 290                        return err;
 291        }
 292
 293        scatterwalk_start(&walk->in, walk->in.sg);
 294        scatterwalk_start(&walk->out, walk->out.sg);
 295
 296        return ablkcipher_walk_next(req, walk);
 297}
 298
 299int ablkcipher_walk_phys(struct ablkcipher_request *req,
 300                         struct ablkcipher_walk *walk)
 301{
 302        walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
 303        return ablkcipher_walk_first(req, walk);
 304}
 305EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
 306
 307static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
 308                            unsigned int keylen)
 309{
 310        struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
 311        unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
 312        int ret;
 313        u8 *buffer, *alignbuffer;
 314        unsigned long absize;
 315
 316        absize = keylen + alignmask;
 317        buffer = kmalloc(absize, GFP_ATOMIC);
 318        if (!buffer)
 319                return -ENOMEM;
 320
 321        alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
 322        memcpy(alignbuffer, key, keylen);
 323        ret = cipher->setkey(tfm, alignbuffer, keylen);
 324        memset(alignbuffer, 0, keylen);
 325        kfree(buffer);
 326        return ret;
 327}
 328
 329static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 330                  unsigned int keylen)
 331{
 332        struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
 333        unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
 334
 335        if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
 336                crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 337                return -EINVAL;
 338        }
 339
 340        if ((unsigned long)key & alignmask)
 341                return setkey_unaligned(tfm, key, keylen);
 342
 343        return cipher->setkey(tfm, key, keylen);
 344}
 345
 346static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
 347                                              u32 mask)
 348{
 349        return alg->cra_ctxsize;
 350}
 351
 352int skcipher_null_givencrypt(struct skcipher_givcrypt_request *req)
 353{
 354        return crypto_ablkcipher_encrypt(&req->creq);
 355}
 356
 357int skcipher_null_givdecrypt(struct skcipher_givcrypt_request *req)
 358{
 359        return crypto_ablkcipher_decrypt(&req->creq);
 360}
 361
 362static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
 363                                      u32 mask)
 364{
 365        struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
 366        struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
 367
 368        if (alg->ivsize > PAGE_SIZE / 8)
 369                return -EINVAL;
 370
 371        crt->setkey = setkey;
 372        crt->encrypt = alg->encrypt;
 373        crt->decrypt = alg->decrypt;
 374        if (!alg->ivsize) {
 375                crt->givencrypt = skcipher_null_givencrypt;
 376                crt->givdecrypt = skcipher_null_givdecrypt;
 377        }
 378        crt->base = __crypto_ablkcipher_cast(tfm);
 379        crt->ivsize = alg->ivsize;
 380
 381        return 0;
 382}
 383
 384static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
 385        __attribute__ ((unused));
 386static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
 387{
 388        struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
 389
 390        seq_printf(m, "type         : ablkcipher\n");
 391        seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
 392                                             "yes" : "no");
 393        seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
 394        seq_printf(m, "min keysize  : %u\n", ablkcipher->min_keysize);
 395        seq_printf(m, "max keysize  : %u\n", ablkcipher->max_keysize);
 396        seq_printf(m, "ivsize       : %u\n", ablkcipher->ivsize);
 397        seq_printf(m, "geniv        : %s\n", ablkcipher->geniv ?: "<default>");
 398}
 399
 400const struct crypto_type crypto_ablkcipher_type = {
 401        .ctxsize = crypto_ablkcipher_ctxsize,
 402        .init = crypto_init_ablkcipher_ops,
 403#ifdef CONFIG_PROC_FS
 404        .show = crypto_ablkcipher_show,
 405#endif
 406};
 407EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
 408
 409static int no_givdecrypt(struct skcipher_givcrypt_request *req)
 410{
 411        return -ENOSYS;
 412}
 413
 414static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
 415                                      u32 mask)
 416{
 417        struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
 418        struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
 419
 420        if (alg->ivsize > PAGE_SIZE / 8)
 421                return -EINVAL;
 422
 423        crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ?
 424                      alg->setkey : setkey;
 425        crt->encrypt = alg->encrypt;
 426        crt->decrypt = alg->decrypt;
 427        crt->givencrypt = alg->givencrypt;
 428        crt->givdecrypt = alg->givdecrypt ?: no_givdecrypt;
 429        crt->base = __crypto_ablkcipher_cast(tfm);
 430        crt->ivsize = alg->ivsize;
 431
 432        return 0;
 433}
 434
 435static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
 436        __attribute__ ((unused));
 437static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
 438{
 439        struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
 440
 441        seq_printf(m, "type         : givcipher\n");
 442        seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
 443                                             "yes" : "no");
 444        seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
 445        seq_printf(m, "min keysize  : %u\n", ablkcipher->min_keysize);
 446        seq_printf(m, "max keysize  : %u\n", ablkcipher->max_keysize);
 447        seq_printf(m, "ivsize       : %u\n", ablkcipher->ivsize);
 448        seq_printf(m, "geniv        : %s\n", ablkcipher->geniv ?: "<built-in>");
 449}
 450
 451const struct crypto_type crypto_givcipher_type = {
 452        .ctxsize = crypto_ablkcipher_ctxsize,
 453        .init = crypto_init_givcipher_ops,
 454#ifdef CONFIG_PROC_FS
 455        .show = crypto_givcipher_show,
 456#endif
 457};
 458EXPORT_SYMBOL_GPL(crypto_givcipher_type);
 459
 460const char *crypto_default_geniv(const struct crypto_alg *alg)
 461{
 462        if (((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
 463             CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
 464                                         alg->cra_ablkcipher.ivsize) !=
 465            alg->cra_blocksize)
 466                return "chainiv";
 467
 468        return alg->cra_flags & CRYPTO_ALG_ASYNC ?
 469               "eseqiv" : skcipher_default_geniv;
 470}
 471
 472static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
 473{
 474        struct rtattr *tb[3];
 475        struct {
 476                struct rtattr attr;
 477                struct crypto_attr_type data;
 478        } ptype;
 479        struct {
 480                struct rtattr attr;
 481                struct crypto_attr_alg data;
 482        } palg;
 483        struct crypto_template *tmpl;
 484        struct crypto_instance *inst;
 485        struct crypto_alg *larval;
 486        const char *geniv;
 487        int err;
 488
 489        larval = crypto_larval_lookup(alg->cra_driver_name,
 490                                      (type & ~CRYPTO_ALG_TYPE_MASK) |
 491                                      CRYPTO_ALG_TYPE_GIVCIPHER,
 492                                      mask | CRYPTO_ALG_TYPE_MASK);
 493        err = PTR_ERR(larval);
 494        if (IS_ERR(larval))
 495                goto out;
 496
 497        err = -EAGAIN;
 498        if (!crypto_is_larval(larval))
 499                goto drop_larval;
 500
 501        ptype.attr.rta_len = sizeof(ptype);
 502        ptype.attr.rta_type = CRYPTOA_TYPE;
 503        ptype.data.type = type | CRYPTO_ALG_GENIV;
 504        /* GENIV tells the template that we're making a default geniv. */
 505        ptype.data.mask = mask | CRYPTO_ALG_GENIV;
 506        tb[0] = &ptype.attr;
 507
 508        palg.attr.rta_len = sizeof(palg);
 509        palg.attr.rta_type = CRYPTOA_ALG;
 510        /* Must use the exact name to locate ourselves. */
 511        memcpy(palg.data.name, alg->cra_driver_name, CRYPTO_MAX_ALG_NAME);
 512        tb[1] = &palg.attr;
 513
 514        tb[2] = NULL;
 515
 516        if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
 517            CRYPTO_ALG_TYPE_BLKCIPHER)
 518                geniv = alg->cra_blkcipher.geniv;
 519        else
 520                geniv = alg->cra_ablkcipher.geniv;
 521
 522        if (!geniv)
 523                geniv = crypto_default_geniv(alg);
 524
 525        tmpl = crypto_lookup_template(geniv);
 526        err = -ENOENT;
 527        if (!tmpl)
 528                goto kill_larval;
 529
 530        inst = tmpl->alloc(tb);
 531        err = PTR_ERR(inst);
 532        if (IS_ERR(inst))
 533                goto put_tmpl;
 534
 535        if ((err = crypto_register_instance(tmpl, inst))) {
 536                tmpl->free(inst);
 537                goto put_tmpl;
 538        }
 539
 540        /* Redo the lookup to use the instance we just registered. */
 541        err = -EAGAIN;
 542
 543put_tmpl:
 544        crypto_tmpl_put(tmpl);
 545kill_larval:
 546        crypto_larval_kill(larval);
 547drop_larval:
 548        crypto_mod_put(larval);
 549out:
 550        crypto_mod_put(alg);
 551        return err;
 552}
 553
 554static struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type,
 555                                                 u32 mask)
 556{
 557        struct crypto_alg *alg;
 558
 559        alg = crypto_alg_mod_lookup(name, type, mask);
 560        if (IS_ERR(alg))
 561                return alg;
 562
 563        if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
 564            CRYPTO_ALG_TYPE_GIVCIPHER)
 565                return alg;
 566
 567        if (!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
 568              CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
 569                                          alg->cra_ablkcipher.ivsize))
 570                return alg;
 571
 572        crypto_mod_put(alg);
 573        alg = crypto_alg_mod_lookup(name, type | CRYPTO_ALG_TESTED,
 574                                    mask & ~CRYPTO_ALG_TESTED);
 575        if (IS_ERR(alg))
 576                return alg;
 577
 578        if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
 579            CRYPTO_ALG_TYPE_GIVCIPHER) {
 580                if ((alg->cra_flags ^ type ^ ~mask) & CRYPTO_ALG_TESTED) {
 581                        crypto_mod_put(alg);
 582                        alg = ERR_PTR(-ENOENT);
 583                }
 584                return alg;
 585        }
 586
 587        BUG_ON(!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
 588                 CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
 589                                             alg->cra_ablkcipher.ivsize));
 590
 591        return ERR_PTR(crypto_givcipher_default(alg, type, mask));
 592}
 593
 594int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
 595                         u32 type, u32 mask)
 596{
 597        struct crypto_alg *alg;
 598        int err;
 599
 600        type = crypto_skcipher_type(type);
 601        mask = crypto_skcipher_mask(mask);
 602
 603        alg = crypto_lookup_skcipher(name, type, mask);
 604        if (IS_ERR(alg))
 605                return PTR_ERR(alg);
 606
 607        err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
 608        crypto_mod_put(alg);
 609        return err;
 610}
 611EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
 612
 613struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
 614                                                  u32 type, u32 mask)
 615{
 616        struct crypto_tfm *tfm;
 617        int err;
 618
 619        type = crypto_skcipher_type(type);
 620        mask = crypto_skcipher_mask(mask);
 621
 622        for (;;) {
 623                struct crypto_alg *alg;
 624
 625                alg = crypto_lookup_skcipher(alg_name, type, mask);
 626                if (IS_ERR(alg)) {
 627                        err = PTR_ERR(alg);
 628                        goto err;
 629                }
 630
 631                tfm = __crypto_alloc_tfm(alg, type, mask);
 632                if (!IS_ERR(tfm))
 633                        return __crypto_ablkcipher_cast(tfm);
 634
 635                crypto_mod_put(alg);
 636                err = PTR_ERR(tfm);
 637
 638err:
 639                if (err != -EAGAIN)
 640                        break;
 641                if (signal_pending(current)) {
 642                        err = -EINTR;
 643                        break;
 644                }
 645        }
 646
 647        return ERR_PTR(err);
 648}
 649EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher);
 650
 651static int __init skcipher_module_init(void)
 652{
 653        skcipher_default_geniv = num_possible_cpus() > 1 ?
 654                                 "eseqiv" : "chainiv";
 655        return 0;
 656}
 657
 658static void skcipher_module_exit(void)
 659{
 660}
 661
 662module_init(skcipher_module_init);
 663module_exit(skcipher_module_exit);
 664