linux/crypto/skcipher.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Symmetric key cipher operations.
   4 *
   5 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
   6 * multiple page boundaries by using temporary blocks.  In user context,
   7 * the kernel is given a chance to schedule us once per page.
   8 *
   9 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
  10 */
  11
  12#include <crypto/internal/aead.h>
  13#include <crypto/internal/skcipher.h>
  14#include <crypto/scatterwalk.h>
  15#include <linux/bug.h>
  16#include <linux/cryptouser.h>
  17#include <linux/compiler.h>
  18#include <linux/list.h>
  19#include <linux/module.h>
  20#include <linux/rtnetlink.h>
  21#include <linux/seq_file.h>
  22#include <net/netlink.h>
  23
  24#include "internal.h"
  25
  26enum {
  27        SKCIPHER_WALK_PHYS = 1 << 0,
  28        SKCIPHER_WALK_SLOW = 1 << 1,
  29        SKCIPHER_WALK_COPY = 1 << 2,
  30        SKCIPHER_WALK_DIFF = 1 << 3,
  31        SKCIPHER_WALK_SLEEP = 1 << 4,
  32};
  33
  34struct skcipher_walk_buffer {
  35        struct list_head entry;
  36        struct scatter_walk dst;
  37        unsigned int len;
  38        u8 *data;
  39        u8 buffer[];
  40};
  41
  42static int skcipher_walk_next(struct skcipher_walk *walk);
  43
  44static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr)
  45{
  46        if (PageHighMem(scatterwalk_page(walk)))
  47                kunmap_atomic(vaddr);
  48}
  49
  50static inline void *skcipher_map(struct scatter_walk *walk)
  51{
  52        struct page *page = scatterwalk_page(walk);
  53
  54        return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) +
  55               offset_in_page(walk->offset);
  56}
  57
  58static inline void skcipher_map_src(struct skcipher_walk *walk)
  59{
  60        walk->src.virt.addr = skcipher_map(&walk->in);
  61}
  62
  63static inline void skcipher_map_dst(struct skcipher_walk *walk)
  64{
  65        walk->dst.virt.addr = skcipher_map(&walk->out);
  66}
  67
  68static inline void skcipher_unmap_src(struct skcipher_walk *walk)
  69{
  70        skcipher_unmap(&walk->in, walk->src.virt.addr);
  71}
  72
  73static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
  74{
  75        skcipher_unmap(&walk->out, walk->dst.virt.addr);
  76}
  77
  78static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
  79{
  80        return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
  81}
  82
  83/* Get a spot of the specified length that does not straddle a page.
  84 * The caller needs to ensure that there is enough space for this operation.
  85 */
  86static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
  87{
  88        u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
  89
  90        return max(start, end_page);
  91}
  92
  93static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
  94{
  95        u8 *addr;
  96
  97        addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
  98        addr = skcipher_get_spot(addr, bsize);
  99        scatterwalk_copychunks(addr, &walk->out, bsize,
 100                               (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
 101        return 0;
 102}
 103
 104int skcipher_walk_done(struct skcipher_walk *walk, int err)
 105{
 106        unsigned int n = walk->nbytes;
 107        unsigned int nbytes = 0;
 108
 109        if (!n)
 110                goto finish;
 111
 112        if (likely(err >= 0)) {
 113                n -= err;
 114                nbytes = walk->total - n;
 115        }
 116
 117        if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
 118                                    SKCIPHER_WALK_SLOW |
 119                                    SKCIPHER_WALK_COPY |
 120                                    SKCIPHER_WALK_DIFF)))) {
 121unmap_src:
 122                skcipher_unmap_src(walk);
 123        } else if (walk->flags & SKCIPHER_WALK_DIFF) {
 124                skcipher_unmap_dst(walk);
 125                goto unmap_src;
 126        } else if (walk->flags & SKCIPHER_WALK_COPY) {
 127                skcipher_map_dst(walk);
 128                memcpy(walk->dst.virt.addr, walk->page, n);
 129                skcipher_unmap_dst(walk);
 130        } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
 131                if (err > 0) {
 132                        /*
 133                         * Didn't process all bytes.  Either the algorithm is
 134                         * broken, or this was the last step and it turned out
 135                         * the message wasn't evenly divisible into blocks but
 136                         * the algorithm requires it.
 137                         */
 138                        err = -EINVAL;
 139                        nbytes = 0;
 140                } else
 141                        n = skcipher_done_slow(walk, n);
 142        }
 143
 144        if (err > 0)
 145                err = 0;
 146
 147        walk->total = nbytes;
 148        walk->nbytes = 0;
 149
 150        scatterwalk_advance(&walk->in, n);
 151        scatterwalk_advance(&walk->out, n);
 152        scatterwalk_done(&walk->in, 0, nbytes);
 153        scatterwalk_done(&walk->out, 1, nbytes);
 154
 155        if (nbytes) {
 156                crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
 157                             CRYPTO_TFM_REQ_MAY_SLEEP : 0);
 158                return skcipher_walk_next(walk);
 159        }
 160
 161finish:
 162        /* Short-circuit for the common/fast path. */
 163        if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
 164                goto out;
 165
 166        if (walk->flags & SKCIPHER_WALK_PHYS)
 167                goto out;
 168
 169        if (walk->iv != walk->oiv)
 170                memcpy(walk->oiv, walk->iv, walk->ivsize);
 171        if (walk->buffer != walk->page)
 172                kfree(walk->buffer);
 173        if (walk->page)
 174                free_page((unsigned long)walk->page);
 175
 176out:
 177        return err;
 178}
 179EXPORT_SYMBOL_GPL(skcipher_walk_done);
 180
 181void skcipher_walk_complete(struct skcipher_walk *walk, int err)
 182{
 183        struct skcipher_walk_buffer *p, *tmp;
 184
 185        list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
 186                u8 *data;
 187
 188                if (err)
 189                        goto done;
 190
 191                data = p->data;
 192                if (!data) {
 193                        data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
 194                        data = skcipher_get_spot(data, walk->stride);
 195                }
 196
 197                scatterwalk_copychunks(data, &p->dst, p->len, 1);
 198
 199                if (offset_in_page(p->data) + p->len + walk->stride >
 200                    PAGE_SIZE)
 201                        free_page((unsigned long)p->data);
 202
 203done:
 204                list_del(&p->entry);
 205                kfree(p);
 206        }
 207
 208        if (!err && walk->iv != walk->oiv)
 209                memcpy(walk->oiv, walk->iv, walk->ivsize);
 210        if (walk->buffer != walk->page)
 211                kfree(walk->buffer);
 212        if (walk->page)
 213                free_page((unsigned long)walk->page);
 214}
 215EXPORT_SYMBOL_GPL(skcipher_walk_complete);
 216
 217static void skcipher_queue_write(struct skcipher_walk *walk,
 218                                 struct skcipher_walk_buffer *p)
 219{
 220        p->dst = walk->out;
 221        list_add_tail(&p->entry, &walk->buffers);
 222}
 223
 224static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
 225{
 226        bool phys = walk->flags & SKCIPHER_WALK_PHYS;
 227        unsigned alignmask = walk->alignmask;
 228        struct skcipher_walk_buffer *p;
 229        unsigned a;
 230        unsigned n;
 231        u8 *buffer;
 232        void *v;
 233
 234        if (!phys) {
 235                if (!walk->buffer)
 236                        walk->buffer = walk->page;
 237                buffer = walk->buffer;
 238                if (buffer)
 239                        goto ok;
 240        }
 241
 242        /* Start with the minimum alignment of kmalloc. */
 243        a = crypto_tfm_ctx_alignment() - 1;
 244        n = bsize;
 245
 246        if (phys) {
 247                /* Calculate the minimum alignment of p->buffer. */
 248                a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
 249                n += sizeof(*p);
 250        }
 251
 252        /* Minimum size to align p->buffer by alignmask. */
 253        n += alignmask & ~a;
 254
 255        /* Minimum size to ensure p->buffer does not straddle a page. */
 256        n += (bsize - 1) & ~(alignmask | a);
 257
 258        v = kzalloc(n, skcipher_walk_gfp(walk));
 259        if (!v)
 260                return skcipher_walk_done(walk, -ENOMEM);
 261
 262        if (phys) {
 263                p = v;
 264                p->len = bsize;
 265                skcipher_queue_write(walk, p);
 266                buffer = p->buffer;
 267        } else {
 268                walk->buffer = v;
 269                buffer = v;
 270        }
 271
 272ok:
 273        walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
 274        walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
 275        walk->src.virt.addr = walk->dst.virt.addr;
 276
 277        scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
 278
 279        walk->nbytes = bsize;
 280        walk->flags |= SKCIPHER_WALK_SLOW;
 281
 282        return 0;
 283}
 284
 285static int skcipher_next_copy(struct skcipher_walk *walk)
 286{
 287        struct skcipher_walk_buffer *p;
 288        u8 *tmp = walk->page;
 289
 290        skcipher_map_src(walk);
 291        memcpy(tmp, walk->src.virt.addr, walk->nbytes);
 292        skcipher_unmap_src(walk);
 293
 294        walk->src.virt.addr = tmp;
 295        walk->dst.virt.addr = tmp;
 296
 297        if (!(walk->flags & SKCIPHER_WALK_PHYS))
 298                return 0;
 299
 300        p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
 301        if (!p)
 302                return -ENOMEM;
 303
 304        p->data = walk->page;
 305        p->len = walk->nbytes;
 306        skcipher_queue_write(walk, p);
 307
 308        if (offset_in_page(walk->page) + walk->nbytes + walk->stride >
 309            PAGE_SIZE)
 310                walk->page = NULL;
 311        else
 312                walk->page += walk->nbytes;
 313
 314        return 0;
 315}
 316
 317static int skcipher_next_fast(struct skcipher_walk *walk)
 318{
 319        unsigned long diff;
 320
 321        walk->src.phys.page = scatterwalk_page(&walk->in);
 322        walk->src.phys.offset = offset_in_page(walk->in.offset);
 323        walk->dst.phys.page = scatterwalk_page(&walk->out);
 324        walk->dst.phys.offset = offset_in_page(walk->out.offset);
 325
 326        if (walk->flags & SKCIPHER_WALK_PHYS)
 327                return 0;
 328
 329        diff = walk->src.phys.offset - walk->dst.phys.offset;
 330        diff |= walk->src.virt.page - walk->dst.virt.page;
 331
 332        skcipher_map_src(walk);
 333        walk->dst.virt.addr = walk->src.virt.addr;
 334
 335        if (diff) {
 336                walk->flags |= SKCIPHER_WALK_DIFF;
 337                skcipher_map_dst(walk);
 338        }
 339
 340        return 0;
 341}
 342
 343static int skcipher_walk_next(struct skcipher_walk *walk)
 344{
 345        unsigned int bsize;
 346        unsigned int n;
 347        int err;
 348
 349        walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
 350                         SKCIPHER_WALK_DIFF);
 351
 352        n = walk->total;
 353        bsize = min(walk->stride, max(n, walk->blocksize));
 354        n = scatterwalk_clamp(&walk->in, n);
 355        n = scatterwalk_clamp(&walk->out, n);
 356
 357        if (unlikely(n < bsize)) {
 358                if (unlikely(walk->total < walk->blocksize))
 359                        return skcipher_walk_done(walk, -EINVAL);
 360
 361slow_path:
 362                err = skcipher_next_slow(walk, bsize);
 363                goto set_phys_lowmem;
 364        }
 365
 366        if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
 367                if (!walk->page) {
 368                        gfp_t gfp = skcipher_walk_gfp(walk);
 369
 370                        walk->page = (void *)__get_free_page(gfp);
 371                        if (!walk->page)
 372                                goto slow_path;
 373                }
 374
 375                walk->nbytes = min_t(unsigned, n,
 376                                     PAGE_SIZE - offset_in_page(walk->page));
 377                walk->flags |= SKCIPHER_WALK_COPY;
 378                err = skcipher_next_copy(walk);
 379                goto set_phys_lowmem;
 380        }
 381
 382        walk->nbytes = n;
 383
 384        return skcipher_next_fast(walk);
 385
 386set_phys_lowmem:
 387        if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
 388                walk->src.phys.page = virt_to_page(walk->src.virt.addr);
 389                walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
 390                walk->src.phys.offset &= PAGE_SIZE - 1;
 391                walk->dst.phys.offset &= PAGE_SIZE - 1;
 392        }
 393        return err;
 394}
 395
 396static int skcipher_copy_iv(struct skcipher_walk *walk)
 397{
 398        unsigned a = crypto_tfm_ctx_alignment() - 1;
 399        unsigned alignmask = walk->alignmask;
 400        unsigned ivsize = walk->ivsize;
 401        unsigned bs = walk->stride;
 402        unsigned aligned_bs;
 403        unsigned size;
 404        u8 *iv;
 405
 406        aligned_bs = ALIGN(bs, alignmask + 1);
 407
 408        /* Minimum size to align buffer by alignmask. */
 409        size = alignmask & ~a;
 410
 411        if (walk->flags & SKCIPHER_WALK_PHYS)
 412                size += ivsize;
 413        else {
 414                size += aligned_bs + ivsize;
 415
 416                /* Minimum size to ensure buffer does not straddle a page. */
 417                size += (bs - 1) & ~(alignmask | a);
 418        }
 419
 420        walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
 421        if (!walk->buffer)
 422                return -ENOMEM;
 423
 424        iv = PTR_ALIGN(walk->buffer, alignmask + 1);
 425        iv = skcipher_get_spot(iv, bs) + aligned_bs;
 426
 427        walk->iv = memcpy(iv, walk->iv, walk->ivsize);
 428        return 0;
 429}
 430
 431static int skcipher_walk_first(struct skcipher_walk *walk)
 432{
 433        if (WARN_ON_ONCE(in_irq()))
 434                return -EDEADLK;
 435
 436        walk->buffer = NULL;
 437        if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
 438                int err = skcipher_copy_iv(walk);
 439                if (err)
 440                        return err;
 441        }
 442
 443        walk->page = NULL;
 444
 445        return skcipher_walk_next(walk);
 446}
 447
 448static int skcipher_walk_skcipher(struct skcipher_walk *walk,
 449                                  struct skcipher_request *req)
 450{
 451        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 452
 453        walk->total = req->cryptlen;
 454        walk->nbytes = 0;
 455        walk->iv = req->iv;
 456        walk->oiv = req->iv;
 457
 458        if (unlikely(!walk->total))
 459                return 0;
 460
 461        scatterwalk_start(&walk->in, req->src);
 462        scatterwalk_start(&walk->out, req->dst);
 463
 464        walk->flags &= ~SKCIPHER_WALK_SLEEP;
 465        walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
 466                       SKCIPHER_WALK_SLEEP : 0;
 467
 468        walk->blocksize = crypto_skcipher_blocksize(tfm);
 469        walk->stride = crypto_skcipher_walksize(tfm);
 470        walk->ivsize = crypto_skcipher_ivsize(tfm);
 471        walk->alignmask = crypto_skcipher_alignmask(tfm);
 472
 473        return skcipher_walk_first(walk);
 474}
 475
 476int skcipher_walk_virt(struct skcipher_walk *walk,
 477                       struct skcipher_request *req, bool atomic)
 478{
 479        int err;
 480
 481        might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
 482
 483        walk->flags &= ~SKCIPHER_WALK_PHYS;
 484
 485        err = skcipher_walk_skcipher(walk, req);
 486
 487        walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
 488
 489        return err;
 490}
 491EXPORT_SYMBOL_GPL(skcipher_walk_virt);
 492
 493void skcipher_walk_atomise(struct skcipher_walk *walk)
 494{
 495        walk->flags &= ~SKCIPHER_WALK_SLEEP;
 496}
 497EXPORT_SYMBOL_GPL(skcipher_walk_atomise);
 498
 499int skcipher_walk_async(struct skcipher_walk *walk,
 500                        struct skcipher_request *req)
 501{
 502        walk->flags |= SKCIPHER_WALK_PHYS;
 503
 504        INIT_LIST_HEAD(&walk->buffers);
 505
 506        return skcipher_walk_skcipher(walk, req);
 507}
 508EXPORT_SYMBOL_GPL(skcipher_walk_async);
 509
 510static int skcipher_walk_aead_common(struct skcipher_walk *walk,
 511                                     struct aead_request *req, bool atomic)
 512{
 513        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 514        int err;
 515
 516        walk->nbytes = 0;
 517        walk->iv = req->iv;
 518        walk->oiv = req->iv;
 519
 520        if (unlikely(!walk->total))
 521                return 0;
 522
 523        walk->flags &= ~SKCIPHER_WALK_PHYS;
 524
 525        scatterwalk_start(&walk->in, req->src);
 526        scatterwalk_start(&walk->out, req->dst);
 527
 528        scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
 529        scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
 530
 531        scatterwalk_done(&walk->in, 0, walk->total);
 532        scatterwalk_done(&walk->out, 0, walk->total);
 533
 534        if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
 535                walk->flags |= SKCIPHER_WALK_SLEEP;
 536        else
 537                walk->flags &= ~SKCIPHER_WALK_SLEEP;
 538
 539        walk->blocksize = crypto_aead_blocksize(tfm);
 540        walk->stride = crypto_aead_chunksize(tfm);
 541        walk->ivsize = crypto_aead_ivsize(tfm);
 542        walk->alignmask = crypto_aead_alignmask(tfm);
 543
 544        err = skcipher_walk_first(walk);
 545
 546        if (atomic)
 547                walk->flags &= ~SKCIPHER_WALK_SLEEP;
 548
 549        return err;
 550}
 551
 552int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req,
 553                       bool atomic)
 554{
 555        walk->total = req->cryptlen;
 556
 557        return skcipher_walk_aead_common(walk, req, atomic);
 558}
 559EXPORT_SYMBOL_GPL(skcipher_walk_aead);
 560
 561int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
 562                               struct aead_request *req, bool atomic)
 563{
 564        walk->total = req->cryptlen;
 565
 566        return skcipher_walk_aead_common(walk, req, atomic);
 567}
 568EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
 569
 570int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
 571                               struct aead_request *req, bool atomic)
 572{
 573        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 574
 575        walk->total = req->cryptlen - crypto_aead_authsize(tfm);
 576
 577        return skcipher_walk_aead_common(walk, req, atomic);
 578}
 579EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
 580
 581static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
 582{
 583        if (alg->cra_type == &crypto_blkcipher_type)
 584                return sizeof(struct crypto_blkcipher *);
 585
 586        if (alg->cra_type == &crypto_ablkcipher_type)
 587                return sizeof(struct crypto_ablkcipher *);
 588
 589        return crypto_alg_extsize(alg);
 590}
 591
 592static void skcipher_set_needkey(struct crypto_skcipher *tfm)
 593{
 594        if (tfm->keysize)
 595                crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
 596}
 597
 598static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
 599                                     const u8 *key, unsigned int keylen)
 600{
 601        struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
 602        struct crypto_blkcipher *blkcipher = *ctx;
 603        int err;
 604
 605        crypto_blkcipher_clear_flags(blkcipher, ~0);
 606        crypto_blkcipher_set_flags(blkcipher, crypto_skcipher_get_flags(tfm) &
 607                                              CRYPTO_TFM_REQ_MASK);
 608        err = crypto_blkcipher_setkey(blkcipher, key, keylen);
 609        crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
 610                                       CRYPTO_TFM_RES_MASK);
 611        if (unlikely(err)) {
 612                skcipher_set_needkey(tfm);
 613                return err;
 614        }
 615
 616        crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
 617        return 0;
 618}
 619
 620static int skcipher_crypt_blkcipher(struct skcipher_request *req,
 621                                    int (*crypt)(struct blkcipher_desc *,
 622                                                 struct scatterlist *,
 623                                                 struct scatterlist *,
 624                                                 unsigned int))
 625{
 626        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 627        struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
 628        struct blkcipher_desc desc = {
 629                .tfm = *ctx,
 630                .info = req->iv,
 631                .flags = req->base.flags,
 632        };
 633
 634
 635        return crypt(&desc, req->dst, req->src, req->cryptlen);
 636}
 637
 638static int skcipher_encrypt_blkcipher(struct skcipher_request *req)
 639{
 640        struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 641        struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
 642        struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
 643
 644        return skcipher_crypt_blkcipher(req, alg->encrypt);
 645}
 646
 647static int skcipher_decrypt_blkcipher(struct skcipher_request *req)
 648{
 649        struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 650        struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
 651        struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
 652
 653        return skcipher_crypt_blkcipher(req, alg->decrypt);
 654}
 655
 656static void crypto_exit_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
 657{
 658        struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
 659
 660        crypto_free_blkcipher(*ctx);
 661}
 662
 663static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
 664{
 665        struct crypto_alg *calg = tfm->__crt_alg;
 666        struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
 667        struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
 668        struct crypto_blkcipher *blkcipher;
 669        struct crypto_tfm *btfm;
 670
 671        if (!crypto_mod_get(calg))
 672                return -EAGAIN;
 673
 674        btfm = __crypto_alloc_tfm(calg, CRYPTO_ALG_TYPE_BLKCIPHER,
 675                                        CRYPTO_ALG_TYPE_MASK);
 676        if (IS_ERR(btfm)) {
 677                crypto_mod_put(calg);
 678                return PTR_ERR(btfm);
 679        }
 680
 681        blkcipher = __crypto_blkcipher_cast(btfm);
 682        *ctx = blkcipher;
 683        tfm->exit = crypto_exit_skcipher_ops_blkcipher;
 684
 685        skcipher->setkey = skcipher_setkey_blkcipher;
 686        skcipher->encrypt = skcipher_encrypt_blkcipher;
 687        skcipher->decrypt = skcipher_decrypt_blkcipher;
 688
 689        skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
 690        skcipher->keysize = calg->cra_blkcipher.max_keysize;
 691
 692        skcipher_set_needkey(skcipher);
 693
 694        return 0;
 695}
 696
 697static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
 698                                      const u8 *key, unsigned int keylen)
 699{
 700        struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
 701        struct crypto_ablkcipher *ablkcipher = *ctx;
 702        int err;
 703
 704        crypto_ablkcipher_clear_flags(ablkcipher, ~0);
 705        crypto_ablkcipher_set_flags(ablkcipher,
 706                                    crypto_skcipher_get_flags(tfm) &
 707                                    CRYPTO_TFM_REQ_MASK);
 708        err = crypto_ablkcipher_setkey(ablkcipher, key, keylen);
 709        crypto_skcipher_set_flags(tfm,
 710                                  crypto_ablkcipher_get_flags(ablkcipher) &
 711                                  CRYPTO_TFM_RES_MASK);
 712        if (unlikely(err)) {
 713                skcipher_set_needkey(tfm);
 714                return err;
 715        }
 716
 717        crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
 718        return 0;
 719}
 720
 721static int skcipher_crypt_ablkcipher(struct skcipher_request *req,
 722                                     int (*crypt)(struct ablkcipher_request *))
 723{
 724        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 725        struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
 726        struct ablkcipher_request *subreq = skcipher_request_ctx(req);
 727
 728        ablkcipher_request_set_tfm(subreq, *ctx);
 729        ablkcipher_request_set_callback(subreq, skcipher_request_flags(req),
 730                                        req->base.complete, req->base.data);
 731        ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
 732                                     req->iv);
 733
 734        return crypt(subreq);
 735}
 736
 737static int skcipher_encrypt_ablkcipher(struct skcipher_request *req)
 738{
 739        struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 740        struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
 741        struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
 742
 743        return skcipher_crypt_ablkcipher(req, alg->encrypt);
 744}
 745
 746static int skcipher_decrypt_ablkcipher(struct skcipher_request *req)
 747{
 748        struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 749        struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
 750        struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
 751
 752        return skcipher_crypt_ablkcipher(req, alg->decrypt);
 753}
 754
 755static void crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
 756{
 757        struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
 758
 759        crypto_free_ablkcipher(*ctx);
 760}
 761
 762static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
 763{
 764        struct crypto_alg *calg = tfm->__crt_alg;
 765        struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
 766        struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
 767        struct crypto_ablkcipher *ablkcipher;
 768        struct crypto_tfm *abtfm;
 769
 770        if (!crypto_mod_get(calg))
 771                return -EAGAIN;
 772
 773        abtfm = __crypto_alloc_tfm(calg, 0, 0);
 774        if (IS_ERR(abtfm)) {
 775                crypto_mod_put(calg);
 776                return PTR_ERR(abtfm);
 777        }
 778
 779        ablkcipher = __crypto_ablkcipher_cast(abtfm);
 780        *ctx = ablkcipher;
 781        tfm->exit = crypto_exit_skcipher_ops_ablkcipher;
 782
 783        skcipher->setkey = skcipher_setkey_ablkcipher;
 784        skcipher->encrypt = skcipher_encrypt_ablkcipher;
 785        skcipher->decrypt = skcipher_decrypt_ablkcipher;
 786
 787        skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
 788        skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) +
 789                            sizeof(struct ablkcipher_request);
 790        skcipher->keysize = calg->cra_ablkcipher.max_keysize;
 791
 792        skcipher_set_needkey(skcipher);
 793
 794        return 0;
 795}
 796
 797static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
 798                                     const u8 *key, unsigned int keylen)
 799{
 800        unsigned long alignmask = crypto_skcipher_alignmask(tfm);
 801        struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
 802        u8 *buffer, *alignbuffer;
 803        unsigned long absize;
 804        int ret;
 805
 806        absize = keylen + alignmask;
 807        buffer = kmalloc(absize, GFP_ATOMIC);
 808        if (!buffer)
 809                return -ENOMEM;
 810
 811        alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
 812        memcpy(alignbuffer, key, keylen);
 813        ret = cipher->setkey(tfm, alignbuffer, keylen);
 814        kzfree(buffer);
 815        return ret;
 816}
 817
 818static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
 819                           unsigned int keylen)
 820{
 821        struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
 822        unsigned long alignmask = crypto_skcipher_alignmask(tfm);
 823        int err;
 824
 825        if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
 826                crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 827                return -EINVAL;
 828        }
 829
 830        if ((unsigned long)key & alignmask)
 831                err = skcipher_setkey_unaligned(tfm, key, keylen);
 832        else
 833                err = cipher->setkey(tfm, key, keylen);
 834
 835        if (unlikely(err)) {
 836                skcipher_set_needkey(tfm);
 837                return err;
 838        }
 839
 840        crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
 841        return 0;
 842}
 843
 844int crypto_skcipher_encrypt(struct skcipher_request *req)
 845{
 846        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 847        struct crypto_alg *alg = tfm->base.__crt_alg;
 848        unsigned int cryptlen = req->cryptlen;
 849        int ret;
 850
 851        crypto_stats_get(alg);
 852        if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
 853                ret = -ENOKEY;
 854        else
 855                ret = tfm->encrypt(req);
 856        crypto_stats_skcipher_encrypt(cryptlen, ret, alg);
 857        return ret;
 858}
 859EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt);
 860
 861int crypto_skcipher_decrypt(struct skcipher_request *req)
 862{
 863        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 864        struct crypto_alg *alg = tfm->base.__crt_alg;
 865        unsigned int cryptlen = req->cryptlen;
 866        int ret;
 867
 868        crypto_stats_get(alg);
 869        if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
 870                ret = -ENOKEY;
 871        else
 872                ret = tfm->decrypt(req);
 873        crypto_stats_skcipher_decrypt(cryptlen, ret, alg);
 874        return ret;
 875}
 876EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt);
 877
 878static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
 879{
 880        struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
 881        struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
 882
 883        alg->exit(skcipher);
 884}
 885
 886static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
 887{
 888        struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
 889        struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
 890
 891        if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
 892                return crypto_init_skcipher_ops_blkcipher(tfm);
 893
 894        if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type)
 895                return crypto_init_skcipher_ops_ablkcipher(tfm);
 896
 897        skcipher->setkey = skcipher_setkey;
 898        skcipher->encrypt = alg->encrypt;
 899        skcipher->decrypt = alg->decrypt;
 900        skcipher->ivsize = alg->ivsize;
 901        skcipher->keysize = alg->max_keysize;
 902
 903        skcipher_set_needkey(skcipher);
 904
 905        if (alg->exit)
 906                skcipher->base.exit = crypto_skcipher_exit_tfm;
 907
 908        if (alg->init)
 909                return alg->init(skcipher);
 910
 911        return 0;
 912}
 913
 914static void crypto_skcipher_free_instance(struct crypto_instance *inst)
 915{
 916        struct skcipher_instance *skcipher =
 917                container_of(inst, struct skcipher_instance, s.base);
 918
 919        skcipher->free(skcipher);
 920}
 921
 922static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
 923        __maybe_unused;
 924static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
 925{
 926        struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
 927                                                     base);
 928
 929        seq_printf(m, "type         : skcipher\n");
 930        seq_printf(m, "async        : %s\n",
 931                   alg->cra_flags & CRYPTO_ALG_ASYNC ?  "yes" : "no");
 932        seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
 933        seq_printf(m, "min keysize  : %u\n", skcipher->min_keysize);
 934        seq_printf(m, "max keysize  : %u\n", skcipher->max_keysize);
 935        seq_printf(m, "ivsize       : %u\n", skcipher->ivsize);
 936        seq_printf(m, "chunksize    : %u\n", skcipher->chunksize);
 937        seq_printf(m, "walksize     : %u\n", skcipher->walksize);
 938}
 939
 940#ifdef CONFIG_NET
 941static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
 942{
 943        struct crypto_report_blkcipher rblkcipher;
 944        struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
 945                                                     base);
 946
 947        memset(&rblkcipher, 0, sizeof(rblkcipher));
 948
 949        strscpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
 950        strscpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
 951
 952        rblkcipher.blocksize = alg->cra_blocksize;
 953        rblkcipher.min_keysize = skcipher->min_keysize;
 954        rblkcipher.max_keysize = skcipher->max_keysize;
 955        rblkcipher.ivsize = skcipher->ivsize;
 956
 957        return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
 958                       sizeof(rblkcipher), &rblkcipher);
 959}
 960#else
 961static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
 962{
 963        return -ENOSYS;
 964}
 965#endif
 966
 967static const struct crypto_type crypto_skcipher_type2 = {
 968        .extsize = crypto_skcipher_extsize,
 969        .init_tfm = crypto_skcipher_init_tfm,
 970        .free = crypto_skcipher_free_instance,
 971#ifdef CONFIG_PROC_FS
 972        .show = crypto_skcipher_show,
 973#endif
 974        .report = crypto_skcipher_report,
 975        .maskclear = ~CRYPTO_ALG_TYPE_MASK,
 976        .maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
 977        .type = CRYPTO_ALG_TYPE_SKCIPHER,
 978        .tfmsize = offsetof(struct crypto_skcipher, base),
 979};
 980
 981int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
 982                          const char *name, u32 type, u32 mask)
 983{
 984        spawn->base.frontend = &crypto_skcipher_type2;
 985        return crypto_grab_spawn(&spawn->base, name, type, mask);
 986}
 987EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
 988
 989struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
 990                                              u32 type, u32 mask)
 991{
 992        return crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
 993}
 994EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
 995
 996struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(
 997                                const char *alg_name, u32 type, u32 mask)
 998{
 999        struct crypto_skcipher *tfm;
1000
1001        /* Only sync algorithms allowed. */
1002        mask |= CRYPTO_ALG_ASYNC;
1003
1004        tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
1005
1006        /*
1007         * Make sure we do not allocate something that might get used with
1008         * an on-stack request: check the request size.
1009         */
1010        if (!IS_ERR(tfm) && WARN_ON(crypto_skcipher_reqsize(tfm) >
1011                                    MAX_SYNC_SKCIPHER_REQSIZE)) {
1012                crypto_free_skcipher(tfm);
1013                return ERR_PTR(-EINVAL);
1014        }
1015
1016        return (struct crypto_sync_skcipher *)tfm;
1017}
1018EXPORT_SYMBOL_GPL(crypto_alloc_sync_skcipher);
1019
1020int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask)
1021{
1022        return crypto_type_has_alg(alg_name, &crypto_skcipher_type2,
1023                                   type, mask);
1024}
1025EXPORT_SYMBOL_GPL(crypto_has_skcipher2);
1026
1027static int skcipher_prepare_alg(struct skcipher_alg *alg)
1028{
1029        struct crypto_alg *base = &alg->base;
1030
1031        if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
1032            alg->walksize > PAGE_SIZE / 8)
1033                return -EINVAL;
1034
1035        if (!alg->chunksize)
1036                alg->chunksize = base->cra_blocksize;
1037        if (!alg->walksize)
1038                alg->walksize = alg->chunksize;
1039
1040        base->cra_type = &crypto_skcipher_type2;
1041        base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
1042        base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
1043
1044        return 0;
1045}
1046
1047int crypto_register_skcipher(struct skcipher_alg *alg)
1048{
1049        struct crypto_alg *base = &alg->base;
1050        int err;
1051
1052        err = skcipher_prepare_alg(alg);
1053        if (err)
1054                return err;
1055
1056        return crypto_register_alg(base);
1057}
1058EXPORT_SYMBOL_GPL(crypto_register_skcipher);
1059
1060void crypto_unregister_skcipher(struct skcipher_alg *alg)
1061{
1062        crypto_unregister_alg(&alg->base);
1063}
1064EXPORT_SYMBOL_GPL(crypto_unregister_skcipher);
1065
1066int crypto_register_skciphers(struct skcipher_alg *algs, int count)
1067{
1068        int i, ret;
1069
1070        for (i = 0; i < count; i++) {
1071                ret = crypto_register_skcipher(&algs[i]);
1072                if (ret)
1073                        goto err;
1074        }
1075
1076        return 0;
1077
1078err:
1079        for (--i; i >= 0; --i)
1080                crypto_unregister_skcipher(&algs[i]);
1081
1082        return ret;
1083}
1084EXPORT_SYMBOL_GPL(crypto_register_skciphers);
1085
1086void crypto_unregister_skciphers(struct skcipher_alg *algs, int count)
1087{
1088        int i;
1089
1090        for (i = count - 1; i >= 0; --i)
1091                crypto_unregister_skcipher(&algs[i]);
1092}
1093EXPORT_SYMBOL_GPL(crypto_unregister_skciphers);
1094
1095int skcipher_register_instance(struct crypto_template *tmpl,
1096                           struct skcipher_instance *inst)
1097{
1098        int err;
1099
1100        err = skcipher_prepare_alg(&inst->alg);
1101        if (err)
1102                return err;
1103
1104        return crypto_register_instance(tmpl, skcipher_crypto_instance(inst));
1105}
1106EXPORT_SYMBOL_GPL(skcipher_register_instance);
1107
1108static int skcipher_setkey_simple(struct crypto_skcipher *tfm, const u8 *key,
1109                                  unsigned int keylen)
1110{
1111        struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
1112        int err;
1113
1114        crypto_cipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK);
1115        crypto_cipher_set_flags(cipher, crypto_skcipher_get_flags(tfm) &
1116                                CRYPTO_TFM_REQ_MASK);
1117        err = crypto_cipher_setkey(cipher, key, keylen);
1118        crypto_skcipher_set_flags(tfm, crypto_cipher_get_flags(cipher) &
1119                                  CRYPTO_TFM_RES_MASK);
1120        return err;
1121}
1122
1123static int skcipher_init_tfm_simple(struct crypto_skcipher *tfm)
1124{
1125        struct skcipher_instance *inst = skcipher_alg_instance(tfm);
1126        struct crypto_spawn *spawn = skcipher_instance_ctx(inst);
1127        struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
1128        struct crypto_cipher *cipher;
1129
1130        cipher = crypto_spawn_cipher(spawn);
1131        if (IS_ERR(cipher))
1132                return PTR_ERR(cipher);
1133
1134        ctx->cipher = cipher;
1135        return 0;
1136}
1137
1138static void skcipher_exit_tfm_simple(struct crypto_skcipher *tfm)
1139{
1140        struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
1141
1142        crypto_free_cipher(ctx->cipher);
1143}
1144
1145static void skcipher_free_instance_simple(struct skcipher_instance *inst)
1146{
1147        crypto_drop_spawn(skcipher_instance_ctx(inst));
1148        kfree(inst);
1149}
1150
1151/**
1152 * skcipher_alloc_instance_simple - allocate instance of simple block cipher mode
1153 *
1154 * Allocate an skcipher_instance for a simple block cipher mode of operation,
1155 * e.g. cbc or ecb.  The instance context will have just a single crypto_spawn,
1156 * that for the underlying cipher.  The {min,max}_keysize, ivsize, blocksize,
1157 * alignmask, and priority are set from the underlying cipher but can be
1158 * overridden if needed.  The tfm context defaults to skcipher_ctx_simple, and
1159 * default ->setkey(), ->init(), and ->exit() methods are installed.
1160 *
1161 * @tmpl: the template being instantiated
1162 * @tb: the template parameters
1163 * @cipher_alg_ret: on success, a pointer to the underlying cipher algorithm is
1164 *                  returned here.  It must be dropped with crypto_mod_put().
1165 *
1166 * Return: a pointer to the new instance, or an ERR_PTR().  The caller still
1167 *         needs to register the instance.
1168 */
1169struct skcipher_instance *
1170skcipher_alloc_instance_simple(struct crypto_template *tmpl, struct rtattr **tb,
1171                               struct crypto_alg **cipher_alg_ret)
1172{
1173        struct crypto_attr_type *algt;
1174        struct crypto_alg *cipher_alg;
1175        struct skcipher_instance *inst;
1176        struct crypto_spawn *spawn;
1177        u32 mask;
1178        int err;
1179
1180        algt = crypto_get_attr_type(tb);
1181        if (IS_ERR(algt))
1182                return ERR_CAST(algt);
1183
1184        if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
1185                return ERR_PTR(-EINVAL);
1186
1187        mask = CRYPTO_ALG_TYPE_MASK |
1188                crypto_requires_off(algt->type, algt->mask,
1189                                    CRYPTO_ALG_NEED_FALLBACK);
1190
1191        cipher_alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask);
1192        if (IS_ERR(cipher_alg))
1193                return ERR_CAST(cipher_alg);
1194
1195        inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
1196        if (!inst) {
1197                err = -ENOMEM;
1198                goto err_put_cipher_alg;
1199        }
1200        spawn = skcipher_instance_ctx(inst);
1201
1202        err = crypto_inst_setname(skcipher_crypto_instance(inst), tmpl->name,
1203                                  cipher_alg);
1204        if (err)
1205                goto err_free_inst;
1206
1207        err = crypto_init_spawn(spawn, cipher_alg,
1208                                skcipher_crypto_instance(inst),
1209                                CRYPTO_ALG_TYPE_MASK);
1210        if (err)
1211                goto err_free_inst;
1212        inst->free = skcipher_free_instance_simple;
1213
1214        /* Default algorithm properties, can be overridden */
1215        inst->alg.base.cra_blocksize = cipher_alg->cra_blocksize;
1216        inst->alg.base.cra_alignmask = cipher_alg->cra_alignmask;
1217        inst->alg.base.cra_priority = cipher_alg->cra_priority;
1218        inst->alg.min_keysize = cipher_alg->cra_cipher.cia_min_keysize;
1219        inst->alg.max_keysize = cipher_alg->cra_cipher.cia_max_keysize;
1220        inst->alg.ivsize = cipher_alg->cra_blocksize;
1221
1222        /* Use skcipher_ctx_simple by default, can be overridden */
1223        inst->alg.base.cra_ctxsize = sizeof(struct skcipher_ctx_simple);
1224        inst->alg.setkey = skcipher_setkey_simple;
1225        inst->alg.init = skcipher_init_tfm_simple;
1226        inst->alg.exit = skcipher_exit_tfm_simple;
1227
1228        *cipher_alg_ret = cipher_alg;
1229        return inst;
1230
1231err_free_inst:
1232        kfree(inst);
1233err_put_cipher_alg:
1234        crypto_mod_put(cipher_alg);
1235        return ERR_PTR(err);
1236}
1237EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple);
1238
1239MODULE_LICENSE("GPL");
1240MODULE_DESCRIPTION("Symmetric key cipher type");
1241