linux/crypto/ablkcipher.c
<<
>>
Prefs
   1/*
   2 * Asynchronous block chaining cipher operations.
   3 *
   4 * This is the asynchronous version of blkcipher.c indicating completion
   5 * via a callback.
   6 *
   7 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
   8 *
   9 * This program is free software; you can redistribute it and/or modify it
  10 * under the terms of the GNU General Public License as published by the Free
  11 * Software Foundation; either version 2 of the License, or (at your option)
  12 * any later version.
  13 *
  14 */
  15
  16#include <crypto/internal/skcipher.h>
  17#include <linux/cpumask.h>
  18#include <linux/err.h>
  19#include <linux/init.h>
  20#include <linux/kernel.h>
  21#include <linux/module.h>
  22#include <linux/rtnetlink.h>
  23#include <linux/sched.h>
  24#include <linux/slab.h>
  25#include <linux/seq_file.h>
  26
  27#include <crypto/scatterwalk.h>
  28
  29#include "internal.h"
  30
  31static const char *skcipher_default_geniv __read_mostly;
  32
  33struct ablkcipher_buffer {
  34        struct list_head        entry;
  35        struct scatter_walk     dst;
  36        unsigned int            len;
  37        void                    *data;
  38};
  39
  40enum {
  41        ABLKCIPHER_WALK_SLOW = 1 << 0,
  42};
  43
  44static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
  45{
  46        scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
  47}
  48
  49void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
  50{
  51        struct ablkcipher_buffer *p, *tmp;
  52
  53        list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
  54                ablkcipher_buffer_write(p);
  55                list_del(&p->entry);
  56                kfree(p);
  57        }
  58}
  59EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
  60
  61static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
  62                                          struct ablkcipher_buffer *p)
  63{
  64        p->dst = walk->out;
  65        list_add_tail(&p->entry, &walk->buffers);
  66}
  67
  68/* Get a spot of the specified length that does not straddle a page.
  69 * The caller needs to ensure that there is enough space for this operation.
  70 */
  71static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
  72{
  73        u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
  74        return max(start, end_page);
  75}
  76
  77static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
  78                                                unsigned int bsize)
  79{
  80        unsigned int n = bsize;
  81
  82        for (;;) {
  83                unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
  84
  85                if (len_this_page > n)
  86                        len_this_page = n;
  87                scatterwalk_advance(&walk->out, n);
  88                if (n == len_this_page)
  89                        break;
  90                n -= len_this_page;
  91                scatterwalk_start(&walk->out, scatterwalk_sg_next(walk->out.sg));
  92        }
  93
  94        return bsize;
  95}
  96
  97static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk,
  98                                                unsigned int n)
  99{
 100        scatterwalk_advance(&walk->in, n);
 101        scatterwalk_advance(&walk->out, n);
 102
 103        return n;
 104}
 105
 106static int ablkcipher_walk_next(struct ablkcipher_request *req,
 107                                struct ablkcipher_walk *walk);
 108
 109int ablkcipher_walk_done(struct ablkcipher_request *req,
 110                         struct ablkcipher_walk *walk, int err)
 111{
 112        struct crypto_tfm *tfm = req->base.tfm;
 113        unsigned int nbytes = 0;
 114
 115        if (likely(err >= 0)) {
 116                unsigned int n = walk->nbytes - err;
 117
 118                if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW)))
 119                        n = ablkcipher_done_fast(walk, n);
 120                else if (WARN_ON(err)) {
 121                        err = -EINVAL;
 122                        goto err;
 123                } else
 124                        n = ablkcipher_done_slow(walk, n);
 125
 126                nbytes = walk->total - n;
 127                err = 0;
 128        }
 129
 130        scatterwalk_done(&walk->in, 0, nbytes);
 131        scatterwalk_done(&walk->out, 1, nbytes);
 132
 133err:
 134        walk->total = nbytes;
 135        walk->nbytes = nbytes;
 136
 137        if (nbytes) {
 138                crypto_yield(req->base.flags);
 139                return ablkcipher_walk_next(req, walk);
 140        }
 141
 142        if (walk->iv != req->info)
 143                memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
 144        if (walk->iv_buffer)
 145                kfree(walk->iv_buffer);
 146
 147        return err;
 148}
 149EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
 150
 151static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
 152                                       struct ablkcipher_walk *walk,
 153                                       unsigned int bsize,
 154                                       unsigned int alignmask,
 155                                       void **src_p, void **dst_p)
 156{
 157        unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
 158        struct ablkcipher_buffer *p;
 159        void *src, *dst, *base;
 160        unsigned int n;
 161
 162        n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
 163        n += (aligned_bsize * 3 - (alignmask + 1) +
 164              (alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
 165
 166        p = kmalloc(n, GFP_ATOMIC);
 167        if (!p)
 168                return ablkcipher_walk_done(req, walk, -ENOMEM);
 169
 170        base = p + 1;
 171
 172        dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
 173        src = dst = ablkcipher_get_spot(dst, bsize);
 174
 175        p->len = bsize;
 176        p->data = dst;
 177
 178        scatterwalk_copychunks(src, &walk->in, bsize, 0);
 179
 180        ablkcipher_queue_write(walk, p);
 181
 182        walk->nbytes = bsize;
 183        walk->flags |= ABLKCIPHER_WALK_SLOW;
 184
 185        *src_p = src;
 186        *dst_p = dst;
 187
 188        return 0;
 189}
 190
 191static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
 192                                     struct crypto_tfm *tfm,
 193                                     unsigned int alignmask)
 194{
 195        unsigned bs = walk->blocksize;
 196        unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
 197        unsigned aligned_bs = ALIGN(bs, alignmask + 1);
 198        unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
 199                            (alignmask + 1);
 200        u8 *iv;
 201
 202        size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
 203        walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
 204        if (!walk->iv_buffer)
 205                return -ENOMEM;
 206
 207        iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
 208        iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
 209        iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
 210        iv = ablkcipher_get_spot(iv, ivsize);
 211
 212        walk->iv = memcpy(iv, walk->iv, ivsize);
 213        return 0;
 214}
 215
 216static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
 217                                       struct ablkcipher_walk *walk)
 218{
 219        walk->src.page = scatterwalk_page(&walk->in);
 220        walk->src.offset = offset_in_page(walk->in.offset);
 221        walk->dst.page = scatterwalk_page(&walk->out);
 222        walk->dst.offset = offset_in_page(walk->out.offset);
 223
 224        return 0;
 225}
 226
 227static int ablkcipher_walk_next(struct ablkcipher_request *req,
 228                                struct ablkcipher_walk *walk)
 229{
 230        struct crypto_tfm *tfm = req->base.tfm;
 231        unsigned int alignmask, bsize, n;
 232        void *src, *dst;
 233        int err;
 234
 235        alignmask = crypto_tfm_alg_alignmask(tfm);
 236        n = walk->total;
 237        if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
 238                req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
 239                return ablkcipher_walk_done(req, walk, -EINVAL);
 240        }
 241
 242        walk->flags &= ~ABLKCIPHER_WALK_SLOW;
 243        src = dst = NULL;
 244
 245        bsize = min(walk->blocksize, n);
 246        n = scatterwalk_clamp(&walk->in, n);
 247        n = scatterwalk_clamp(&walk->out, n);
 248
 249        if (n < bsize ||
 250            !scatterwalk_aligned(&walk->in, alignmask) ||
 251            !scatterwalk_aligned(&walk->out, alignmask)) {
 252                err = ablkcipher_next_slow(req, walk, bsize, alignmask,
 253                                           &src, &dst);
 254                goto set_phys_lowmem;
 255        }
 256
 257        walk->nbytes = n;
 258
 259        return ablkcipher_next_fast(req, walk);
 260
 261set_phys_lowmem:
 262        if (err >= 0) {
 263                walk->src.page = virt_to_page(src);
 264                walk->dst.page = virt_to_page(dst);
 265                walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
 266                walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
 267        }
 268
 269        return err;
 270}
 271
 272static int ablkcipher_walk_first(struct ablkcipher_request *req,
 273                                 struct ablkcipher_walk *walk)
 274{
 275        struct crypto_tfm *tfm = req->base.tfm;
 276        unsigned int alignmask;
 277
 278        alignmask = crypto_tfm_alg_alignmask(tfm);
 279        if (WARN_ON_ONCE(in_irq()))
 280                return -EDEADLK;
 281
 282        walk->nbytes = walk->total;
 283        if (unlikely(!walk->total))
 284                return 0;
 285
 286        walk->iv_buffer = NULL;
 287        walk->iv = req->info;
 288        if (unlikely(((unsigned long)walk->iv & alignmask))) {
 289                int err = ablkcipher_copy_iv(walk, tfm, alignmask);
 290                if (err)
 291                        return err;
 292        }
 293
 294        scatterwalk_start(&walk->in, walk->in.sg);
 295        scatterwalk_start(&walk->out, walk->out.sg);
 296
 297        return ablkcipher_walk_next(req, walk);
 298}
 299
 300int ablkcipher_walk_phys(struct ablkcipher_request *req,
 301                         struct ablkcipher_walk *walk)
 302{
 303        walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
 304        return ablkcipher_walk_first(req, walk);
 305}
 306EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
 307
 308static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
 309                            unsigned int keylen)
 310{
 311        struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
 312        unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
 313        int ret;
 314        u8 *buffer, *alignbuffer;
 315        unsigned long absize;
 316
 317        absize = keylen + alignmask;
 318        buffer = kmalloc(absize, GFP_ATOMIC);
 319        if (!buffer)
 320                return -ENOMEM;
 321
 322        alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
 323        memcpy(alignbuffer, key, keylen);
 324        ret = cipher->setkey(tfm, alignbuffer, keylen);
 325        memset(alignbuffer, 0, keylen);
 326        kfree(buffer);
 327        return ret;
 328}
 329
 330static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 331                  unsigned int keylen)
 332{
 333        struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
 334        unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
 335
 336        if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
 337                crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 338                return -EINVAL;
 339        }
 340
 341        if ((unsigned long)key & alignmask)
 342                return setkey_unaligned(tfm, key, keylen);
 343
 344        return cipher->setkey(tfm, key, keylen);
 345}
 346
 347static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
 348                                              u32 mask)
 349{
 350        return alg->cra_ctxsize;
 351}
 352
 353int skcipher_null_givencrypt(struct skcipher_givcrypt_request *req)
 354{
 355        return crypto_ablkcipher_encrypt(&req->creq);
 356}
 357
 358int skcipher_null_givdecrypt(struct skcipher_givcrypt_request *req)
 359{
 360        return crypto_ablkcipher_decrypt(&req->creq);
 361}
 362
 363static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
 364                                      u32 mask)
 365{
 366        struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
 367        struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
 368
 369        if (alg->ivsize > PAGE_SIZE / 8)
 370                return -EINVAL;
 371
 372        crt->setkey = setkey;
 373        crt->encrypt = alg->encrypt;
 374        crt->decrypt = alg->decrypt;
 375        if (!alg->ivsize) {
 376                crt->givencrypt = skcipher_null_givencrypt;
 377                crt->givdecrypt = skcipher_null_givdecrypt;
 378        }
 379        crt->base = __crypto_ablkcipher_cast(tfm);
 380        crt->ivsize = alg->ivsize;
 381
 382        return 0;
 383}
 384
 385static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
 386        __attribute__ ((unused));
 387static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
 388{
 389        struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
 390
 391        seq_printf(m, "type         : ablkcipher\n");
 392        seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
 393                                             "yes" : "no");
 394        seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
 395        seq_printf(m, "min keysize  : %u\n", ablkcipher->min_keysize);
 396        seq_printf(m, "max keysize  : %u\n", ablkcipher->max_keysize);
 397        seq_printf(m, "ivsize       : %u\n", ablkcipher->ivsize);
 398        seq_printf(m, "geniv        : %s\n", ablkcipher->geniv ?: "<default>");
 399}
 400
 401const struct crypto_type crypto_ablkcipher_type = {
 402        .ctxsize = crypto_ablkcipher_ctxsize,
 403        .init = crypto_init_ablkcipher_ops,
 404#ifdef CONFIG_PROC_FS
 405        .show = crypto_ablkcipher_show,
 406#endif
 407};
 408EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
 409
 410static int no_givdecrypt(struct skcipher_givcrypt_request *req)
 411{
 412        return -ENOSYS;
 413}
 414
 415static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
 416                                      u32 mask)
 417{
 418        struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
 419        struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
 420
 421        if (alg->ivsize > PAGE_SIZE / 8)
 422                return -EINVAL;
 423
 424        crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ?
 425                      alg->setkey : setkey;
 426        crt->encrypt = alg->encrypt;
 427        crt->decrypt = alg->decrypt;
 428        crt->givencrypt = alg->givencrypt;
 429        crt->givdecrypt = alg->givdecrypt ?: no_givdecrypt;
 430        crt->base = __crypto_ablkcipher_cast(tfm);
 431        crt->ivsize = alg->ivsize;
 432
 433        return 0;
 434}
 435
 436static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
 437        __attribute__ ((unused));
 438static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
 439{
 440        struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
 441
 442        seq_printf(m, "type         : givcipher\n");
 443        seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
 444                                             "yes" : "no");
 445        seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
 446        seq_printf(m, "min keysize  : %u\n", ablkcipher->min_keysize);
 447        seq_printf(m, "max keysize  : %u\n", ablkcipher->max_keysize);
 448        seq_printf(m, "ivsize       : %u\n", ablkcipher->ivsize);
 449        seq_printf(m, "geniv        : %s\n", ablkcipher->geniv ?: "<built-in>");
 450}
 451
 452const struct crypto_type crypto_givcipher_type = {
 453        .ctxsize = crypto_ablkcipher_ctxsize,
 454        .init = crypto_init_givcipher_ops,
 455#ifdef CONFIG_PROC_FS
 456        .show = crypto_givcipher_show,
 457#endif
 458};
 459EXPORT_SYMBOL_GPL(crypto_givcipher_type);
 460
 461const char *crypto_default_geniv(const struct crypto_alg *alg)
 462{
 463        if (((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
 464             CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
 465                                         alg->cra_ablkcipher.ivsize) !=
 466            alg->cra_blocksize)
 467                return "chainiv";
 468
 469        return alg->cra_flags & CRYPTO_ALG_ASYNC ?
 470               "eseqiv" : skcipher_default_geniv;
 471}
 472
 473static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
 474{
 475        struct rtattr *tb[3];
 476        struct {
 477                struct rtattr attr;
 478                struct crypto_attr_type data;
 479        } ptype;
 480        struct {
 481                struct rtattr attr;
 482                struct crypto_attr_alg data;
 483        } palg;
 484        struct crypto_template *tmpl;
 485        struct crypto_instance *inst;
 486        struct crypto_alg *larval;
 487        const char *geniv;
 488        int err;
 489
 490        larval = crypto_larval_lookup(alg->cra_driver_name,
 491                                      (type & ~CRYPTO_ALG_TYPE_MASK) |
 492                                      CRYPTO_ALG_TYPE_GIVCIPHER,
 493                                      mask | CRYPTO_ALG_TYPE_MASK);
 494        err = PTR_ERR(larval);
 495        if (IS_ERR(larval))
 496                goto out;
 497
 498        err = -EAGAIN;
 499        if (!crypto_is_larval(larval))
 500                goto drop_larval;
 501
 502        ptype.attr.rta_len = sizeof(ptype);
 503        ptype.attr.rta_type = CRYPTOA_TYPE;
 504        ptype.data.type = type | CRYPTO_ALG_GENIV;
 505        /* GENIV tells the template that we're making a default geniv. */
 506        ptype.data.mask = mask | CRYPTO_ALG_GENIV;
 507        tb[0] = &ptype.attr;
 508
 509        palg.attr.rta_len = sizeof(palg);
 510        palg.attr.rta_type = CRYPTOA_ALG;
 511        /* Must use the exact name to locate ourselves. */
 512        memcpy(palg.data.name, alg->cra_driver_name, CRYPTO_MAX_ALG_NAME);
 513        tb[1] = &palg.attr;
 514
 515        tb[2] = NULL;
 516
 517        if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
 518            CRYPTO_ALG_TYPE_BLKCIPHER)
 519                geniv = alg->cra_blkcipher.geniv;
 520        else
 521                geniv = alg->cra_ablkcipher.geniv;
 522
 523        if (!geniv)
 524                geniv = crypto_default_geniv(alg);
 525
 526        tmpl = crypto_lookup_template(geniv);
 527        err = -ENOENT;
 528        if (!tmpl)
 529                goto kill_larval;
 530
 531        inst = tmpl->alloc(tb);
 532        err = PTR_ERR(inst);
 533        if (IS_ERR(inst))
 534                goto put_tmpl;
 535
 536        if ((err = crypto_register_instance(tmpl, inst))) {
 537                tmpl->free(inst);
 538                goto put_tmpl;
 539        }
 540
 541        /* Redo the lookup to use the instance we just registered. */
 542        err = -EAGAIN;
 543
 544put_tmpl:
 545        crypto_tmpl_put(tmpl);
 546kill_larval:
 547        crypto_larval_kill(larval);
 548drop_larval:
 549        crypto_mod_put(larval);
 550out:
 551        crypto_mod_put(alg);
 552        return err;
 553}
 554
 555static struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type,
 556                                                 u32 mask)
 557{
 558        struct crypto_alg *alg;
 559
 560        alg = crypto_alg_mod_lookup(name, type, mask);
 561        if (IS_ERR(alg))
 562                return alg;
 563
 564        if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
 565            CRYPTO_ALG_TYPE_GIVCIPHER)
 566                return alg;
 567
 568        if (!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
 569              CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
 570                                          alg->cra_ablkcipher.ivsize))
 571                return alg;
 572
 573        crypto_mod_put(alg);
 574        alg = crypto_alg_mod_lookup(name, type | CRYPTO_ALG_TESTED,
 575                                    mask & ~CRYPTO_ALG_TESTED);
 576        if (IS_ERR(alg))
 577                return alg;
 578
 579        if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
 580            CRYPTO_ALG_TYPE_GIVCIPHER) {
 581                if ((alg->cra_flags ^ type ^ ~mask) & CRYPTO_ALG_TESTED) {
 582                        crypto_mod_put(alg);
 583                        alg = ERR_PTR(-ENOENT);
 584                }
 585                return alg;
 586        }
 587
 588        BUG_ON(!((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
 589                 CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
 590                                             alg->cra_ablkcipher.ivsize));
 591
 592        return ERR_PTR(crypto_givcipher_default(alg, type, mask));
 593}
 594
 595int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
 596                         u32 type, u32 mask)
 597{
 598        struct crypto_alg *alg;
 599        int err;
 600
 601        type = crypto_skcipher_type(type);
 602        mask = crypto_skcipher_mask(mask);
 603
 604        alg = crypto_lookup_skcipher(name, type, mask);
 605        if (IS_ERR(alg))
 606                return PTR_ERR(alg);
 607
 608        err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
 609        crypto_mod_put(alg);
 610        return err;
 611}
 612EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
 613
 614struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
 615                                                  u32 type, u32 mask)
 616{
 617        struct crypto_tfm *tfm;
 618        int err;
 619
 620        type = crypto_skcipher_type(type);
 621        mask = crypto_skcipher_mask(mask);
 622
 623        for (;;) {
 624                struct crypto_alg *alg;
 625
 626                alg = crypto_lookup_skcipher(alg_name, type, mask);
 627                if (IS_ERR(alg)) {
 628                        err = PTR_ERR(alg);
 629                        goto err;
 630                }
 631
 632                tfm = __crypto_alloc_tfm(alg, type, mask);
 633                if (!IS_ERR(tfm))
 634                        return __crypto_ablkcipher_cast(tfm);
 635
 636                crypto_mod_put(alg);
 637                err = PTR_ERR(tfm);
 638
 639err:
 640                if (err != -EAGAIN)
 641                        break;
 642                if (signal_pending(current)) {
 643                        err = -EINTR;
 644                        break;
 645                }
 646        }
 647
 648        return ERR_PTR(err);
 649}
 650EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher);
 651
 652static int __init skcipher_module_init(void)
 653{
 654        skcipher_default_geniv = num_possible_cpus() > 1 ?
 655                                 "eseqiv" : "chainiv";
 656        return 0;
 657}
 658
 659static void skcipher_module_exit(void)
 660{
 661}
 662
 663module_init(skcipher_module_init);
 664module_exit(skcipher_module_exit);
 665