linux/crypto/skcipher.c
<<
>>
Prefs
   1/*
   2 * Symmetric key cipher operations.
   3 *
   4 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
   5 * multiple page boundaries by using temporary blocks.  In user context,
   6 * the kernel is given a chance to schedule us once per page.
   7 *
   8 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
   9 *
  10 * This program is free software; you can redistribute it and/or modify it
  11 * under the terms of the GNU General Public License as published by the Free
  12 * Software Foundation; either version 2 of the License, or (at your option)
  13 * any later version.
  14 *
  15 */
  16
  17#include <crypto/internal/aead.h>
  18#include <crypto/internal/skcipher.h>
  19#include <crypto/scatterwalk.h>
  20#include <linux/bug.h>
  21#include <linux/cryptouser.h>
  22#include <linux/compiler.h>
  23#include <linux/list.h>
  24#include <linux/module.h>
  25#include <linux/rtnetlink.h>
  26#include <linux/seq_file.h>
  27#include <net/netlink.h>
  28
  29#include "internal.h"
  30
  31enum {
  32        SKCIPHER_WALK_PHYS = 1 << 0,
  33        SKCIPHER_WALK_SLOW = 1 << 1,
  34        SKCIPHER_WALK_COPY = 1 << 2,
  35        SKCIPHER_WALK_DIFF = 1 << 3,
  36        SKCIPHER_WALK_SLEEP = 1 << 4,
  37};
  38
  39struct skcipher_walk_buffer {
  40        struct list_head entry;
  41        struct scatter_walk dst;
  42        unsigned int len;
  43        u8 *data;
  44        u8 buffer[];
  45};
  46
  47static int skcipher_walk_next(struct skcipher_walk *walk);
  48
  49static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr)
  50{
  51        if (PageHighMem(scatterwalk_page(walk)))
  52                kunmap_atomic(vaddr);
  53}
  54
  55static inline void *skcipher_map(struct scatter_walk *walk)
  56{
  57        struct page *page = scatterwalk_page(walk);
  58
  59        return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) +
  60               offset_in_page(walk->offset);
  61}
  62
  63static inline void skcipher_map_src(struct skcipher_walk *walk)
  64{
  65        walk->src.virt.addr = skcipher_map(&walk->in);
  66}
  67
  68static inline void skcipher_map_dst(struct skcipher_walk *walk)
  69{
  70        walk->dst.virt.addr = skcipher_map(&walk->out);
  71}
  72
  73static inline void skcipher_unmap_src(struct skcipher_walk *walk)
  74{
  75        skcipher_unmap(&walk->in, walk->src.virt.addr);
  76}
  77
  78static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
  79{
  80        skcipher_unmap(&walk->out, walk->dst.virt.addr);
  81}
  82
  83static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
  84{
  85        return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
  86}
  87
  88/* Get a spot of the specified length that does not straddle a page.
  89 * The caller needs to ensure that there is enough space for this operation.
  90 */
  91static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
  92{
  93        u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
  94
  95        return max(start, end_page);
  96}
  97
  98static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
  99{
 100        u8 *addr;
 101
 102        addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
 103        addr = skcipher_get_spot(addr, bsize);
 104        scatterwalk_copychunks(addr, &walk->out, bsize,
 105                               (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
 106}
 107
 108int skcipher_walk_done(struct skcipher_walk *walk, int err)
 109{
 110        unsigned int n; /* bytes processed */
 111        bool more;
 112
 113        if (unlikely(err < 0))
 114                goto finish;
 115
 116        n = walk->nbytes - err;
 117        walk->total -= n;
 118        more = (walk->total != 0);
 119
 120        if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
 121                                    SKCIPHER_WALK_SLOW |
 122                                    SKCIPHER_WALK_COPY |
 123                                    SKCIPHER_WALK_DIFF)))) {
 124unmap_src:
 125                skcipher_unmap_src(walk);
 126        } else if (walk->flags & SKCIPHER_WALK_DIFF) {
 127                skcipher_unmap_dst(walk);
 128                goto unmap_src;
 129        } else if (walk->flags & SKCIPHER_WALK_COPY) {
 130                skcipher_map_dst(walk);
 131                memcpy(walk->dst.virt.addr, walk->page, n);
 132                skcipher_unmap_dst(walk);
 133        } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
 134                if (WARN_ON(err)) {
 135                        /* unexpected case; didn't process all bytes */
 136                        err = -EINVAL;
 137                        goto finish;
 138                }
 139                skcipher_done_slow(walk, n);
 140                goto already_advanced;
 141        }
 142
 143        scatterwalk_advance(&walk->in, n);
 144        scatterwalk_advance(&walk->out, n);
 145already_advanced:
 146        scatterwalk_done(&walk->in, 0, more);
 147        scatterwalk_done(&walk->out, 1, more);
 148
 149        if (more) {
 150                crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
 151                             CRYPTO_TFM_REQ_MAY_SLEEP : 0);
 152                return skcipher_walk_next(walk);
 153        }
 154        err = 0;
 155finish:
 156        walk->nbytes = 0;
 157
 158        /* Short-circuit for the common/fast path. */
 159        if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
 160                goto out;
 161
 162        if (walk->flags & SKCIPHER_WALK_PHYS)
 163                goto out;
 164
 165        if (walk->iv != walk->oiv)
 166                memcpy(walk->oiv, walk->iv, walk->ivsize);
 167        if (walk->buffer != walk->page)
 168                kfree(walk->buffer);
 169        if (walk->page)
 170                free_page((unsigned long)walk->page);
 171
 172out:
 173        return err;
 174}
 175EXPORT_SYMBOL_GPL(skcipher_walk_done);
 176
 177void skcipher_walk_complete(struct skcipher_walk *walk, int err)
 178{
 179        struct skcipher_walk_buffer *p, *tmp;
 180
 181        list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
 182                u8 *data;
 183
 184                if (err)
 185                        goto done;
 186
 187                data = p->data;
 188                if (!data) {
 189                        data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
 190                        data = skcipher_get_spot(data, walk->stride);
 191                }
 192
 193                scatterwalk_copychunks(data, &p->dst, p->len, 1);
 194
 195                if (offset_in_page(p->data) + p->len + walk->stride >
 196                    PAGE_SIZE)
 197                        free_page((unsigned long)p->data);
 198
 199done:
 200                list_del(&p->entry);
 201                kfree(p);
 202        }
 203
 204        if (!err && walk->iv != walk->oiv)
 205                memcpy(walk->oiv, walk->iv, walk->ivsize);
 206        if (walk->buffer != walk->page)
 207                kfree(walk->buffer);
 208        if (walk->page)
 209                free_page((unsigned long)walk->page);
 210}
 211EXPORT_SYMBOL_GPL(skcipher_walk_complete);
 212
 213static void skcipher_queue_write(struct skcipher_walk *walk,
 214                                 struct skcipher_walk_buffer *p)
 215{
 216        p->dst = walk->out;
 217        list_add_tail(&p->entry, &walk->buffers);
 218}
 219
 220static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
 221{
 222        bool phys = walk->flags & SKCIPHER_WALK_PHYS;
 223        unsigned alignmask = walk->alignmask;
 224        struct skcipher_walk_buffer *p;
 225        unsigned a;
 226        unsigned n;
 227        u8 *buffer;
 228        void *v;
 229
 230        if (!phys) {
 231                if (!walk->buffer)
 232                        walk->buffer = walk->page;
 233                buffer = walk->buffer;
 234                if (buffer)
 235                        goto ok;
 236        }
 237
 238        /* Start with the minimum alignment of kmalloc. */
 239        a = crypto_tfm_ctx_alignment() - 1;
 240        n = bsize;
 241
 242        if (phys) {
 243                /* Calculate the minimum alignment of p->buffer. */
 244                a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
 245                n += sizeof(*p);
 246        }
 247
 248        /* Minimum size to align p->buffer by alignmask. */
 249        n += alignmask & ~a;
 250
 251        /* Minimum size to ensure p->buffer does not straddle a page. */
 252        n += (bsize - 1) & ~(alignmask | a);
 253
 254        v = kzalloc(n, skcipher_walk_gfp(walk));
 255        if (!v)
 256                return skcipher_walk_done(walk, -ENOMEM);
 257
 258        if (phys) {
 259                p = v;
 260                p->len = bsize;
 261                skcipher_queue_write(walk, p);
 262                buffer = p->buffer;
 263        } else {
 264                walk->buffer = v;
 265                buffer = v;
 266        }
 267
 268ok:
 269        walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
 270        walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
 271        walk->src.virt.addr = walk->dst.virt.addr;
 272
 273        scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
 274
 275        walk->nbytes = bsize;
 276        walk->flags |= SKCIPHER_WALK_SLOW;
 277
 278        return 0;
 279}
 280
 281static int skcipher_next_copy(struct skcipher_walk *walk)
 282{
 283        struct skcipher_walk_buffer *p;
 284        u8 *tmp = walk->page;
 285
 286        skcipher_map_src(walk);
 287        memcpy(tmp, walk->src.virt.addr, walk->nbytes);
 288        skcipher_unmap_src(walk);
 289
 290        walk->src.virt.addr = tmp;
 291        walk->dst.virt.addr = tmp;
 292
 293        if (!(walk->flags & SKCIPHER_WALK_PHYS))
 294                return 0;
 295
 296        p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
 297        if (!p)
 298                return -ENOMEM;
 299
 300        p->data = walk->page;
 301        p->len = walk->nbytes;
 302        skcipher_queue_write(walk, p);
 303
 304        if (offset_in_page(walk->page) + walk->nbytes + walk->stride >
 305            PAGE_SIZE)
 306                walk->page = NULL;
 307        else
 308                walk->page += walk->nbytes;
 309
 310        return 0;
 311}
 312
 313static int skcipher_next_fast(struct skcipher_walk *walk)
 314{
 315        unsigned long diff;
 316
 317        walk->src.phys.page = scatterwalk_page(&walk->in);
 318        walk->src.phys.offset = offset_in_page(walk->in.offset);
 319        walk->dst.phys.page = scatterwalk_page(&walk->out);
 320        walk->dst.phys.offset = offset_in_page(walk->out.offset);
 321
 322        if (walk->flags & SKCIPHER_WALK_PHYS)
 323                return 0;
 324
 325        diff = walk->src.phys.offset - walk->dst.phys.offset;
 326        diff |= walk->src.virt.page - walk->dst.virt.page;
 327
 328        skcipher_map_src(walk);
 329        walk->dst.virt.addr = walk->src.virt.addr;
 330
 331        if (diff) {
 332                walk->flags |= SKCIPHER_WALK_DIFF;
 333                skcipher_map_dst(walk);
 334        }
 335
 336        return 0;
 337}
 338
 339static int skcipher_walk_next(struct skcipher_walk *walk)
 340{
 341        unsigned int bsize;
 342        unsigned int n;
 343        int err;
 344
 345        walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
 346                         SKCIPHER_WALK_DIFF);
 347
 348        n = walk->total;
 349        bsize = min(walk->stride, max(n, walk->blocksize));
 350        n = scatterwalk_clamp(&walk->in, n);
 351        n = scatterwalk_clamp(&walk->out, n);
 352
 353        if (unlikely(n < bsize)) {
 354                if (unlikely(walk->total < walk->blocksize))
 355                        return skcipher_walk_done(walk, -EINVAL);
 356
 357slow_path:
 358                err = skcipher_next_slow(walk, bsize);
 359                goto set_phys_lowmem;
 360        }
 361
 362        if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
 363                if (!walk->page) {
 364                        gfp_t gfp = skcipher_walk_gfp(walk);
 365
 366                        walk->page = (void *)__get_free_page(gfp);
 367                        if (!walk->page)
 368                                goto slow_path;
 369                }
 370
 371                walk->nbytes = min_t(unsigned, n,
 372                                     PAGE_SIZE - offset_in_page(walk->page));
 373                walk->flags |= SKCIPHER_WALK_COPY;
 374                err = skcipher_next_copy(walk);
 375                goto set_phys_lowmem;
 376        }
 377
 378        walk->nbytes = n;
 379
 380        return skcipher_next_fast(walk);
 381
 382set_phys_lowmem:
 383        if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
 384                walk->src.phys.page = virt_to_page(walk->src.virt.addr);
 385                walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
 386                walk->src.phys.offset &= PAGE_SIZE - 1;
 387                walk->dst.phys.offset &= PAGE_SIZE - 1;
 388        }
 389        return err;
 390}
 391
 392static int skcipher_copy_iv(struct skcipher_walk *walk)
 393{
 394        unsigned a = crypto_tfm_ctx_alignment() - 1;
 395        unsigned alignmask = walk->alignmask;
 396        unsigned ivsize = walk->ivsize;
 397        unsigned bs = walk->stride;
 398        unsigned aligned_bs;
 399        unsigned size;
 400        u8 *iv;
 401
 402        aligned_bs = ALIGN(bs, alignmask);
 403
 404        /* Minimum size to align buffer by alignmask. */
 405        size = alignmask & ~a;
 406
 407        if (walk->flags & SKCIPHER_WALK_PHYS)
 408                size += ivsize;
 409        else {
 410                size += aligned_bs + ivsize;
 411
 412                /* Minimum size to ensure buffer does not straddle a page. */
 413                size += (bs - 1) & ~(alignmask | a);
 414        }
 415
 416        walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
 417        if (!walk->buffer)
 418                return -ENOMEM;
 419
 420        iv = PTR_ALIGN(walk->buffer, alignmask + 1);
 421        iv = skcipher_get_spot(iv, bs) + aligned_bs;
 422
 423        walk->iv = memcpy(iv, walk->iv, walk->ivsize);
 424        return 0;
 425}
 426
 427static int skcipher_walk_first(struct skcipher_walk *walk)
 428{
 429        if (WARN_ON_ONCE(in_irq()))
 430                return -EDEADLK;
 431
 432        walk->buffer = NULL;
 433        if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
 434                int err = skcipher_copy_iv(walk);
 435                if (err)
 436                        return err;
 437        }
 438
 439        walk->page = NULL;
 440        walk->nbytes = walk->total;
 441
 442        return skcipher_walk_next(walk);
 443}
 444
 445static int skcipher_walk_skcipher(struct skcipher_walk *walk,
 446                                  struct skcipher_request *req)
 447{
 448        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 449
 450        walk->total = req->cryptlen;
 451        walk->nbytes = 0;
 452        walk->iv = req->iv;
 453        walk->oiv = req->iv;
 454
 455        if (unlikely(!walk->total))
 456                return 0;
 457
 458        scatterwalk_start(&walk->in, req->src);
 459        scatterwalk_start(&walk->out, req->dst);
 460
 461        walk->flags &= ~SKCIPHER_WALK_SLEEP;
 462        walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
 463                       SKCIPHER_WALK_SLEEP : 0;
 464
 465        walk->blocksize = crypto_skcipher_blocksize(tfm);
 466        walk->stride = crypto_skcipher_walksize(tfm);
 467        walk->ivsize = crypto_skcipher_ivsize(tfm);
 468        walk->alignmask = crypto_skcipher_alignmask(tfm);
 469
 470        return skcipher_walk_first(walk);
 471}
 472
 473int skcipher_walk_virt(struct skcipher_walk *walk,
 474                       struct skcipher_request *req, bool atomic)
 475{
 476        int err;
 477
 478        walk->flags &= ~SKCIPHER_WALK_PHYS;
 479
 480        err = skcipher_walk_skcipher(walk, req);
 481
 482        walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
 483
 484        return err;
 485}
 486EXPORT_SYMBOL_GPL(skcipher_walk_virt);
 487
 488void skcipher_walk_atomise(struct skcipher_walk *walk)
 489{
 490        walk->flags &= ~SKCIPHER_WALK_SLEEP;
 491}
 492EXPORT_SYMBOL_GPL(skcipher_walk_atomise);
 493
 494int skcipher_walk_async(struct skcipher_walk *walk,
 495                        struct skcipher_request *req)
 496{
 497        walk->flags |= SKCIPHER_WALK_PHYS;
 498
 499        INIT_LIST_HEAD(&walk->buffers);
 500
 501        return skcipher_walk_skcipher(walk, req);
 502}
 503EXPORT_SYMBOL_GPL(skcipher_walk_async);
 504
 505static int skcipher_walk_aead_common(struct skcipher_walk *walk,
 506                                     struct aead_request *req, bool atomic)
 507{
 508        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 509        int err;
 510
 511        walk->nbytes = 0;
 512        walk->iv = req->iv;
 513        walk->oiv = req->iv;
 514
 515        if (unlikely(!walk->total))
 516                return 0;
 517
 518        walk->flags &= ~SKCIPHER_WALK_PHYS;
 519
 520        scatterwalk_start(&walk->in, req->src);
 521        scatterwalk_start(&walk->out, req->dst);
 522
 523        scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
 524        scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
 525
 526        scatterwalk_done(&walk->in, 0, walk->total);
 527        scatterwalk_done(&walk->out, 0, walk->total);
 528
 529        if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
 530                walk->flags |= SKCIPHER_WALK_SLEEP;
 531        else
 532                walk->flags &= ~SKCIPHER_WALK_SLEEP;
 533
 534        walk->blocksize = crypto_aead_blocksize(tfm);
 535        walk->stride = crypto_aead_chunksize(tfm);
 536        walk->ivsize = crypto_aead_ivsize(tfm);
 537        walk->alignmask = crypto_aead_alignmask(tfm);
 538
 539        err = skcipher_walk_first(walk);
 540
 541        if (atomic)
 542                walk->flags &= ~SKCIPHER_WALK_SLEEP;
 543
 544        return err;
 545}
 546
 547int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req,
 548                       bool atomic)
 549{
 550        walk->total = req->cryptlen;
 551
 552        return skcipher_walk_aead_common(walk, req, atomic);
 553}
 554EXPORT_SYMBOL_GPL(skcipher_walk_aead);
 555
 556int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
 557                               struct aead_request *req, bool atomic)
 558{
 559        walk->total = req->cryptlen;
 560
 561        return skcipher_walk_aead_common(walk, req, atomic);
 562}
 563EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
 564
 565int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
 566                               struct aead_request *req, bool atomic)
 567{
 568        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 569
 570        walk->total = req->cryptlen - crypto_aead_authsize(tfm);
 571
 572        return skcipher_walk_aead_common(walk, req, atomic);
 573}
 574EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
 575
 576static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
 577{
 578        if (alg->cra_type == &crypto_blkcipher_type)
 579                return sizeof(struct crypto_blkcipher *);
 580
 581        if (alg->cra_type == &crypto_ablkcipher_type ||
 582            alg->cra_type == &crypto_givcipher_type)
 583                return sizeof(struct crypto_ablkcipher *);
 584
 585        return crypto_alg_extsize(alg);
 586}
 587
 588static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
 589                                     const u8 *key, unsigned int keylen)
 590{
 591        struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
 592        struct crypto_blkcipher *blkcipher = *ctx;
 593        int err;
 594
 595        crypto_blkcipher_clear_flags(blkcipher, ~0);
 596        crypto_blkcipher_set_flags(blkcipher, crypto_skcipher_get_flags(tfm) &
 597                                              CRYPTO_TFM_REQ_MASK);
 598        err = crypto_blkcipher_setkey(blkcipher, key, keylen);
 599        crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
 600                                       CRYPTO_TFM_RES_MASK);
 601        if (err)
 602                return err;
 603
 604        crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
 605        return 0;
 606}
 607
 608static int skcipher_crypt_blkcipher(struct skcipher_request *req,
 609                                    int (*crypt)(struct blkcipher_desc *,
 610                                                 struct scatterlist *,
 611                                                 struct scatterlist *,
 612                                                 unsigned int))
 613{
 614        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 615        struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
 616        struct blkcipher_desc desc = {
 617                .tfm = *ctx,
 618                .info = req->iv,
 619                .flags = req->base.flags,
 620        };
 621
 622
 623        return crypt(&desc, req->dst, req->src, req->cryptlen);
 624}
 625
 626static int skcipher_encrypt_blkcipher(struct skcipher_request *req)
 627{
 628        struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 629        struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
 630        struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
 631
 632        return skcipher_crypt_blkcipher(req, alg->encrypt);
 633}
 634
 635static int skcipher_decrypt_blkcipher(struct skcipher_request *req)
 636{
 637        struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 638        struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
 639        struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
 640
 641        return skcipher_crypt_blkcipher(req, alg->decrypt);
 642}
 643
 644static void crypto_exit_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
 645{
 646        struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
 647
 648        crypto_free_blkcipher(*ctx);
 649}
 650
 651static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
 652{
 653        struct crypto_alg *calg = tfm->__crt_alg;
 654        struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
 655        struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
 656        struct crypto_blkcipher *blkcipher;
 657        struct crypto_tfm *btfm;
 658
 659        if (!crypto_mod_get(calg))
 660                return -EAGAIN;
 661
 662        btfm = __crypto_alloc_tfm(calg, CRYPTO_ALG_TYPE_BLKCIPHER,
 663                                        CRYPTO_ALG_TYPE_MASK);
 664        if (IS_ERR(btfm)) {
 665                crypto_mod_put(calg);
 666                return PTR_ERR(btfm);
 667        }
 668
 669        blkcipher = __crypto_blkcipher_cast(btfm);
 670        *ctx = blkcipher;
 671        tfm->exit = crypto_exit_skcipher_ops_blkcipher;
 672
 673        skcipher->setkey = skcipher_setkey_blkcipher;
 674        skcipher->encrypt = skcipher_encrypt_blkcipher;
 675        skcipher->decrypt = skcipher_decrypt_blkcipher;
 676
 677        skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
 678        skcipher->keysize = calg->cra_blkcipher.max_keysize;
 679
 680        if (skcipher->keysize)
 681                crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
 682
 683        return 0;
 684}
 685
 686static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
 687                                      const u8 *key, unsigned int keylen)
 688{
 689        struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
 690        struct crypto_ablkcipher *ablkcipher = *ctx;
 691        int err;
 692
 693        crypto_ablkcipher_clear_flags(ablkcipher, ~0);
 694        crypto_ablkcipher_set_flags(ablkcipher,
 695                                    crypto_skcipher_get_flags(tfm) &
 696                                    CRYPTO_TFM_REQ_MASK);
 697        err = crypto_ablkcipher_setkey(ablkcipher, key, keylen);
 698        crypto_skcipher_set_flags(tfm,
 699                                  crypto_ablkcipher_get_flags(ablkcipher) &
 700                                  CRYPTO_TFM_RES_MASK);
 701        if (err)
 702                return err;
 703
 704        crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
 705        return 0;
 706}
 707
 708static int skcipher_crypt_ablkcipher(struct skcipher_request *req,
 709                                     int (*crypt)(struct ablkcipher_request *))
 710{
 711        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 712        struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
 713        struct ablkcipher_request *subreq = skcipher_request_ctx(req);
 714
 715        ablkcipher_request_set_tfm(subreq, *ctx);
 716        ablkcipher_request_set_callback(subreq, skcipher_request_flags(req),
 717                                        req->base.complete, req->base.data);
 718        ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
 719                                     req->iv);
 720
 721        return crypt(subreq);
 722}
 723
 724static int skcipher_encrypt_ablkcipher(struct skcipher_request *req)
 725{
 726        struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 727        struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
 728        struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
 729
 730        return skcipher_crypt_ablkcipher(req, alg->encrypt);
 731}
 732
 733static int skcipher_decrypt_ablkcipher(struct skcipher_request *req)
 734{
 735        struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 736        struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
 737        struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
 738
 739        return skcipher_crypt_ablkcipher(req, alg->decrypt);
 740}
 741
 742static void crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
 743{
 744        struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
 745
 746        crypto_free_ablkcipher(*ctx);
 747}
 748
 749static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
 750{
 751        struct crypto_alg *calg = tfm->__crt_alg;
 752        struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
 753        struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
 754        struct crypto_ablkcipher *ablkcipher;
 755        struct crypto_tfm *abtfm;
 756
 757        if (!crypto_mod_get(calg))
 758                return -EAGAIN;
 759
 760        abtfm = __crypto_alloc_tfm(calg, 0, 0);
 761        if (IS_ERR(abtfm)) {
 762                crypto_mod_put(calg);
 763                return PTR_ERR(abtfm);
 764        }
 765
 766        ablkcipher = __crypto_ablkcipher_cast(abtfm);
 767        *ctx = ablkcipher;
 768        tfm->exit = crypto_exit_skcipher_ops_ablkcipher;
 769
 770        skcipher->setkey = skcipher_setkey_ablkcipher;
 771        skcipher->encrypt = skcipher_encrypt_ablkcipher;
 772        skcipher->decrypt = skcipher_decrypt_ablkcipher;
 773
 774        skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
 775        skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) +
 776                            sizeof(struct ablkcipher_request);
 777        skcipher->keysize = calg->cra_ablkcipher.max_keysize;
 778
 779        if (skcipher->keysize)
 780                crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
 781
 782        return 0;
 783}
 784
 785static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
 786                                     const u8 *key, unsigned int keylen)
 787{
 788        unsigned long alignmask = crypto_skcipher_alignmask(tfm);
 789        struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
 790        u8 *buffer, *alignbuffer;
 791        unsigned long absize;
 792        int ret;
 793
 794        absize = keylen + alignmask;
 795        buffer = kmalloc(absize, GFP_ATOMIC);
 796        if (!buffer)
 797                return -ENOMEM;
 798
 799        alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
 800        memcpy(alignbuffer, key, keylen);
 801        ret = cipher->setkey(tfm, alignbuffer, keylen);
 802        kfree_sensitive(buffer);
 803        return ret;
 804}
 805
 806static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
 807                           unsigned int keylen)
 808{
 809        struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
 810        unsigned long alignmask = crypto_skcipher_alignmask(tfm);
 811        int err;
 812
 813        if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
 814                crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 815                return -EINVAL;
 816        }
 817
 818        if ((unsigned long)key & alignmask)
 819                err = skcipher_setkey_unaligned(tfm, key, keylen);
 820        else
 821                err = cipher->setkey(tfm, key, keylen);
 822
 823        if (err)
 824                return err;
 825
 826        crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
 827        return 0;
 828}
 829
 830static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
 831{
 832        struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
 833        struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
 834
 835        alg->exit(skcipher);
 836}
 837
 838static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
 839{
 840        struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
 841        struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
 842
 843        if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
 844                return crypto_init_skcipher_ops_blkcipher(tfm);
 845
 846        if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type ||
 847            tfm->__crt_alg->cra_type == &crypto_givcipher_type)
 848                return crypto_init_skcipher_ops_ablkcipher(tfm);
 849
 850        skcipher->setkey = skcipher_setkey;
 851        skcipher->encrypt = alg->encrypt;
 852        skcipher->decrypt = alg->decrypt;
 853        skcipher->ivsize = alg->ivsize;
 854        skcipher->keysize = alg->max_keysize;
 855
 856        if (skcipher->keysize)
 857                crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
 858
 859        if (alg->exit)
 860                skcipher->base.exit = crypto_skcipher_exit_tfm;
 861
 862        if (alg->init)
 863                return alg->init(skcipher);
 864
 865        return 0;
 866}
 867
 868static void crypto_skcipher_free_instance(struct crypto_instance *inst)
 869{
 870        struct skcipher_instance *skcipher =
 871                container_of(inst, struct skcipher_instance, s.base);
 872
 873        skcipher->free(skcipher);
 874}
 875
 876static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
 877        __maybe_unused;
 878static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
 879{
 880        struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
 881                                                     base);
 882
 883        seq_printf(m, "type         : skcipher\n");
 884        seq_printf(m, "async        : %s\n",
 885                   alg->cra_flags & CRYPTO_ALG_ASYNC ?  "yes" : "no");
 886        seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
 887        seq_printf(m, "min keysize  : %u\n", skcipher->min_keysize);
 888        seq_printf(m, "max keysize  : %u\n", skcipher->max_keysize);
 889        seq_printf(m, "ivsize       : %u\n", skcipher->ivsize);
 890        seq_printf(m, "chunksize    : %u\n", skcipher->chunksize);
 891        seq_printf(m, "walksize     : %u\n", skcipher->walksize);
 892}
 893
 894#ifdef CONFIG_NET
 895static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
 896{
 897        struct crypto_report_blkcipher rblkcipher;
 898        struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
 899                                                     base);
 900
 901        strncpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
 902        strncpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
 903
 904        rblkcipher.blocksize = alg->cra_blocksize;
 905        rblkcipher.min_keysize = skcipher->min_keysize;
 906        rblkcipher.max_keysize = skcipher->max_keysize;
 907        rblkcipher.ivsize = skcipher->ivsize;
 908
 909        if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
 910                    sizeof(struct crypto_report_blkcipher), &rblkcipher))
 911                goto nla_put_failure;
 912        return 0;
 913
 914nla_put_failure:
 915        return -EMSGSIZE;
 916}
 917#else
 918static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
 919{
 920        return -ENOSYS;
 921}
 922#endif
 923
 924static const struct crypto_type crypto_skcipher_type2 = {
 925        .extsize = crypto_skcipher_extsize,
 926        .init_tfm = crypto_skcipher_init_tfm,
 927        .free = crypto_skcipher_free_instance,
 928#ifdef CONFIG_PROC_FS
 929        .show = crypto_skcipher_show,
 930#endif
 931        .report = crypto_skcipher_report,
 932        .maskclear = ~CRYPTO_ALG_TYPE_MASK,
 933        .maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
 934        .type = CRYPTO_ALG_TYPE_SKCIPHER,
 935        .tfmsize = offsetof(struct crypto_skcipher, base),
 936};
 937
 938int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
 939                          const char *name, u32 type, u32 mask)
 940{
 941        spawn->base.frontend = &crypto_skcipher_type2;
 942        return crypto_grab_spawn(&spawn->base, name, type, mask);
 943}
 944EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
 945
 946struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
 947                                              u32 type, u32 mask)
 948{
 949        return crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
 950}
 951EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
 952
 953struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(
 954                                const char *alg_name, u32 type, u32 mask)
 955{
 956        struct crypto_skcipher *tfm;
 957
 958        /* Only sync algorithms allowed. */
 959        mask |= CRYPTO_ALG_ASYNC;
 960
 961        tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
 962
 963        /*
 964         * Make sure we do not allocate something that might get used with
 965         * an on-stack request: check the request size.
 966         */
 967        if (!IS_ERR(tfm) && WARN_ON(crypto_skcipher_reqsize(tfm) >
 968                                    MAX_SYNC_SKCIPHER_REQSIZE)) {
 969                crypto_free_skcipher(tfm);
 970                return ERR_PTR(-EINVAL);
 971        }
 972
 973        return (struct crypto_sync_skcipher *)tfm;
 974}
 975EXPORT_SYMBOL_GPL(crypto_alloc_sync_skcipher);
 976
 977int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask)
 978{
 979        return crypto_type_has_alg(alg_name, &crypto_skcipher_type2,
 980                                   type, mask);
 981}
 982EXPORT_SYMBOL_GPL(crypto_has_skcipher2);
 983
 984static int skcipher_prepare_alg(struct skcipher_alg *alg)
 985{
 986        struct crypto_alg *base = &alg->base;
 987
 988        if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
 989            alg->walksize > PAGE_SIZE / 8)
 990                return -EINVAL;
 991
 992        if (!alg->chunksize)
 993                alg->chunksize = base->cra_blocksize;
 994        if (!alg->walksize)
 995                alg->walksize = alg->chunksize;
 996
 997        base->cra_type = &crypto_skcipher_type2;
 998        base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
 999        base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
1000
1001        return 0;
1002}
1003
1004int crypto_register_skcipher(struct skcipher_alg *alg)
1005{
1006        struct crypto_alg *base = &alg->base;
1007        int err;
1008
1009        err = skcipher_prepare_alg(alg);
1010        if (err)
1011                return err;
1012
1013        return crypto_register_alg(base);
1014}
1015EXPORT_SYMBOL_GPL(crypto_register_skcipher);
1016
1017void crypto_unregister_skcipher(struct skcipher_alg *alg)
1018{
1019        crypto_unregister_alg(&alg->base);
1020}
1021EXPORT_SYMBOL_GPL(crypto_unregister_skcipher);
1022
1023int crypto_register_skciphers(struct skcipher_alg *algs, int count)
1024{
1025        int i, ret;
1026
1027        for (i = 0; i < count; i++) {
1028                ret = crypto_register_skcipher(&algs[i]);
1029                if (ret)
1030                        goto err;
1031        }
1032
1033        return 0;
1034
1035err:
1036        for (--i; i >= 0; --i)
1037                crypto_unregister_skcipher(&algs[i]);
1038
1039        return ret;
1040}
1041EXPORT_SYMBOL_GPL(crypto_register_skciphers);
1042
1043void crypto_unregister_skciphers(struct skcipher_alg *algs, int count)
1044{
1045        int i;
1046
1047        for (i = count - 1; i >= 0; --i)
1048                crypto_unregister_skcipher(&algs[i]);
1049}
1050EXPORT_SYMBOL_GPL(crypto_unregister_skciphers);
1051
1052int skcipher_register_instance(struct crypto_template *tmpl,
1053                           struct skcipher_instance *inst)
1054{
1055        int err;
1056
1057        err = skcipher_prepare_alg(&inst->alg);
1058        if (err)
1059                return err;
1060
1061        return crypto_register_instance(tmpl, skcipher_crypto_instance(inst));
1062}
1063EXPORT_SYMBOL_GPL(skcipher_register_instance);
1064
1065MODULE_LICENSE("GPL");
1066MODULE_DESCRIPTION("Symmetric key cipher type");
1067