linux/crypto/ablkcipher.c
<<
>>
Prefs
   1/*
   2 * Asynchronous block chaining cipher operations.
   3 *
   4 * This is the asynchronous version of blkcipher.c indicating completion
   5 * via a callback.
   6 *
   7 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
   8 *
   9 * This program is free software; you can redistribute it and/or modify it
  10 * under the terms of the GNU General Public License as published by the Free
  11 * Software Foundation; either version 2 of the License, or (at your option)
  12 * any later version.
  13 *
  14 */
  15
  16#include <crypto/internal/skcipher.h>
  17#include <linux/err.h>
  18#include <linux/kernel.h>
  19#include <linux/slab.h>
  20#include <linux/seq_file.h>
  21#include <linux/cryptouser.h>
  22#include <linux/compiler.h>
  23#include <net/netlink.h>
  24
  25#include <crypto/scatterwalk.h>
  26
  27#include "internal.h"
  28
  29struct ablkcipher_buffer {
  30        struct list_head        entry;
  31        struct scatter_walk     dst;
  32        unsigned int            len;
  33        void                    *data;
  34};
  35
  36enum {
  37        ABLKCIPHER_WALK_SLOW = 1 << 0,
  38};
  39
  40static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
  41{
  42        scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
  43}
  44
  45void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
  46{
  47        struct ablkcipher_buffer *p, *tmp;
  48
  49        list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
  50                ablkcipher_buffer_write(p);
  51                list_del(&p->entry);
  52                kfree(p);
  53        }
  54}
  55EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
  56
  57static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
  58                                          struct ablkcipher_buffer *p)
  59{
  60        p->dst = walk->out;
  61        list_add_tail(&p->entry, &walk->buffers);
  62}
  63
  64/* Get a spot of the specified length that does not straddle a page.
  65 * The caller needs to ensure that there is enough space for this operation.
  66 */
  67static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
  68{
  69        u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
  70
  71        return max(start, end_page);
  72}
  73
  74static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
  75                                                unsigned int bsize)
  76{
  77        unsigned int n = bsize;
  78
  79        for (;;) {
  80                unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
  81
  82                if (len_this_page > n)
  83                        len_this_page = n;
  84                scatterwalk_advance(&walk->out, n);
  85                if (n == len_this_page)
  86                        break;
  87                n -= len_this_page;
  88                scatterwalk_start(&walk->out, sg_next(walk->out.sg));
  89        }
  90
  91        return bsize;
  92}
  93
  94static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk,
  95                                                unsigned int n)
  96{
  97        scatterwalk_advance(&walk->in, n);
  98        scatterwalk_advance(&walk->out, n);
  99
 100        return n;
 101}
 102
 103static int ablkcipher_walk_next(struct ablkcipher_request *req,
 104                                struct ablkcipher_walk *walk);
 105
 106int ablkcipher_walk_done(struct ablkcipher_request *req,
 107                         struct ablkcipher_walk *walk, int err)
 108{
 109        struct crypto_tfm *tfm = req->base.tfm;
 110        unsigned int nbytes = 0;
 111
 112        if (likely(err >= 0)) {
 113                unsigned int n = walk->nbytes - err;
 114
 115                if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW)))
 116                        n = ablkcipher_done_fast(walk, n);
 117                else if (WARN_ON(err)) {
 118                        err = -EINVAL;
 119                        goto err;
 120                } else
 121                        n = ablkcipher_done_slow(walk, n);
 122
 123                nbytes = walk->total - n;
 124                err = 0;
 125        }
 126
 127        scatterwalk_done(&walk->in, 0, nbytes);
 128        scatterwalk_done(&walk->out, 1, nbytes);
 129
 130err:
 131        walk->total = nbytes;
 132        walk->nbytes = nbytes;
 133
 134        if (nbytes) {
 135                crypto_yield(req->base.flags);
 136                return ablkcipher_walk_next(req, walk);
 137        }
 138
 139        if (walk->iv != req->info)
 140                memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
 141        kfree(walk->iv_buffer);
 142
 143        return err;
 144}
 145EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
 146
 147static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
 148                                       struct ablkcipher_walk *walk,
 149                                       unsigned int bsize,
 150                                       unsigned int alignmask,
 151                                       void **src_p, void **dst_p)
 152{
 153        unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
 154        struct ablkcipher_buffer *p;
 155        void *src, *dst, *base;
 156        unsigned int n;
 157
 158        n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
 159        n += (aligned_bsize * 3 - (alignmask + 1) +
 160              (alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
 161
 162        p = kmalloc(n, GFP_ATOMIC);
 163        if (!p)
 164                return ablkcipher_walk_done(req, walk, -ENOMEM);
 165
 166        base = p + 1;
 167
 168        dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
 169        src = dst = ablkcipher_get_spot(dst, bsize);
 170
 171        p->len = bsize;
 172        p->data = dst;
 173
 174        scatterwalk_copychunks(src, &walk->in, bsize, 0);
 175
 176        ablkcipher_queue_write(walk, p);
 177
 178        walk->nbytes = bsize;
 179        walk->flags |= ABLKCIPHER_WALK_SLOW;
 180
 181        *src_p = src;
 182        *dst_p = dst;
 183
 184        return 0;
 185}
 186
 187static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
 188                                     struct crypto_tfm *tfm,
 189                                     unsigned int alignmask)
 190{
 191        unsigned bs = walk->blocksize;
 192        unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
 193        unsigned aligned_bs = ALIGN(bs, alignmask + 1);
 194        unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
 195                            (alignmask + 1);
 196        u8 *iv;
 197
 198        size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
 199        walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
 200        if (!walk->iv_buffer)
 201                return -ENOMEM;
 202
 203        iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
 204        iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
 205        iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
 206        iv = ablkcipher_get_spot(iv, ivsize);
 207
 208        walk->iv = memcpy(iv, walk->iv, ivsize);
 209        return 0;
 210}
 211
 212static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
 213                                       struct ablkcipher_walk *walk)
 214{
 215        walk->src.page = scatterwalk_page(&walk->in);
 216        walk->src.offset = offset_in_page(walk->in.offset);
 217        walk->dst.page = scatterwalk_page(&walk->out);
 218        walk->dst.offset = offset_in_page(walk->out.offset);
 219
 220        return 0;
 221}
 222
 223static int ablkcipher_walk_next(struct ablkcipher_request *req,
 224                                struct ablkcipher_walk *walk)
 225{
 226        struct crypto_tfm *tfm = req->base.tfm;
 227        unsigned int alignmask, bsize, n;
 228        void *src, *dst;
 229        int err;
 230
 231        alignmask = crypto_tfm_alg_alignmask(tfm);
 232        n = walk->total;
 233        if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
 234                req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
 235                return ablkcipher_walk_done(req, walk, -EINVAL);
 236        }
 237
 238        walk->flags &= ~ABLKCIPHER_WALK_SLOW;
 239        src = dst = NULL;
 240
 241        bsize = min(walk->blocksize, n);
 242        n = scatterwalk_clamp(&walk->in, n);
 243        n = scatterwalk_clamp(&walk->out, n);
 244
 245        if (n < bsize ||
 246            !scatterwalk_aligned(&walk->in, alignmask) ||
 247            !scatterwalk_aligned(&walk->out, alignmask)) {
 248                err = ablkcipher_next_slow(req, walk, bsize, alignmask,
 249                                           &src, &dst);
 250                goto set_phys_lowmem;
 251        }
 252
 253        walk->nbytes = n;
 254
 255        return ablkcipher_next_fast(req, walk);
 256
 257set_phys_lowmem:
 258        if (err >= 0) {
 259                walk->src.page = virt_to_page(src);
 260                walk->dst.page = virt_to_page(dst);
 261                walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
 262                walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
 263        }
 264
 265        return err;
 266}
 267
 268static int ablkcipher_walk_first(struct ablkcipher_request *req,
 269                                 struct ablkcipher_walk *walk)
 270{
 271        struct crypto_tfm *tfm = req->base.tfm;
 272        unsigned int alignmask;
 273
 274        alignmask = crypto_tfm_alg_alignmask(tfm);
 275        if (WARN_ON_ONCE(in_irq()))
 276                return -EDEADLK;
 277
 278        walk->iv = req->info;
 279        walk->nbytes = walk->total;
 280        if (unlikely(!walk->total))
 281                return 0;
 282
 283        walk->iv_buffer = NULL;
 284        if (unlikely(((unsigned long)walk->iv & alignmask))) {
 285                int err = ablkcipher_copy_iv(walk, tfm, alignmask);
 286
 287                if (err)
 288                        return err;
 289        }
 290
 291        scatterwalk_start(&walk->in, walk->in.sg);
 292        scatterwalk_start(&walk->out, walk->out.sg);
 293
 294        return ablkcipher_walk_next(req, walk);
 295}
 296
 297int ablkcipher_walk_phys(struct ablkcipher_request *req,
 298                         struct ablkcipher_walk *walk)
 299{
 300        walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
 301        return ablkcipher_walk_first(req, walk);
 302}
 303EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
 304
 305static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
 306                            unsigned int keylen)
 307{
 308        struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
 309        unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
 310        int ret;
 311        u8 *buffer, *alignbuffer;
 312        unsigned long absize;
 313
 314        absize = keylen + alignmask;
 315        buffer = kmalloc(absize, GFP_ATOMIC);
 316        if (!buffer)
 317                return -ENOMEM;
 318
 319        alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
 320        memcpy(alignbuffer, key, keylen);
 321        ret = cipher->setkey(tfm, alignbuffer, keylen);
 322        memset(alignbuffer, 0, keylen);
 323        kfree(buffer);
 324        return ret;
 325}
 326
 327static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 328                  unsigned int keylen)
 329{
 330        struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
 331        unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
 332
 333        if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
 334                crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 335                return -EINVAL;
 336        }
 337
 338        if ((unsigned long)key & alignmask)
 339                return setkey_unaligned(tfm, key, keylen);
 340
 341        return cipher->setkey(tfm, key, keylen);
 342}
 343
 344static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
 345                                              u32 mask)
 346{
 347        return alg->cra_ctxsize;
 348}
 349
 350static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
 351                                      u32 mask)
 352{
 353        struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
 354        struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
 355
 356        if (alg->ivsize > PAGE_SIZE / 8)
 357                return -EINVAL;
 358
 359        crt->setkey = setkey;
 360        crt->encrypt = alg->encrypt;
 361        crt->decrypt = alg->decrypt;
 362        crt->base = __crypto_ablkcipher_cast(tfm);
 363        crt->ivsize = alg->ivsize;
 364
 365        return 0;
 366}
 367
 368#ifdef CONFIG_NET
 369static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
 370{
 371        struct crypto_report_blkcipher rblkcipher;
 372
 373        strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
 374        strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
 375                sizeof(rblkcipher.geniv));
 376
 377        rblkcipher.blocksize = alg->cra_blocksize;
 378        rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
 379        rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
 380        rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
 381
 382        if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
 383                    sizeof(struct crypto_report_blkcipher), &rblkcipher))
 384                goto nla_put_failure;
 385        return 0;
 386
 387nla_put_failure:
 388        return -EMSGSIZE;
 389}
 390#else
 391static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
 392{
 393        return -ENOSYS;
 394}
 395#endif
 396
 397static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
 398        __maybe_unused;
 399static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
 400{
 401        struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
 402
 403        seq_printf(m, "type         : ablkcipher\n");
 404        seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
 405                                             "yes" : "no");
 406        seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
 407        seq_printf(m, "min keysize  : %u\n", ablkcipher->min_keysize);
 408        seq_printf(m, "max keysize  : %u\n", ablkcipher->max_keysize);
 409        seq_printf(m, "ivsize       : %u\n", ablkcipher->ivsize);
 410        seq_printf(m, "geniv        : %s\n", ablkcipher->geniv ?: "<default>");
 411}
 412
 413const struct crypto_type crypto_ablkcipher_type = {
 414        .ctxsize = crypto_ablkcipher_ctxsize,
 415        .init = crypto_init_ablkcipher_ops,
 416#ifdef CONFIG_PROC_FS
 417        .show = crypto_ablkcipher_show,
 418#endif
 419        .report = crypto_ablkcipher_report,
 420};
 421EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
 422
 423static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
 424                                      u32 mask)
 425{
 426        struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
 427        struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
 428
 429        if (alg->ivsize > PAGE_SIZE / 8)
 430                return -EINVAL;
 431
 432        crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ?
 433                      alg->setkey : setkey;
 434        crt->encrypt = alg->encrypt;
 435        crt->decrypt = alg->decrypt;
 436        crt->base = __crypto_ablkcipher_cast(tfm);
 437        crt->ivsize = alg->ivsize;
 438
 439        return 0;
 440}
 441
 442#ifdef CONFIG_NET
 443static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
 444{
 445        struct crypto_report_blkcipher rblkcipher;
 446
 447        strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
 448        strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
 449                sizeof(rblkcipher.geniv));
 450
 451        rblkcipher.blocksize = alg->cra_blocksize;
 452        rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
 453        rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
 454        rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
 455
 456        if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
 457                    sizeof(struct crypto_report_blkcipher), &rblkcipher))
 458                goto nla_put_failure;
 459        return 0;
 460
 461nla_put_failure:
 462        return -EMSGSIZE;
 463}
 464#else
 465static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
 466{
 467        return -ENOSYS;
 468}
 469#endif
 470
 471static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
 472        __maybe_unused;
 473static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
 474{
 475        struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
 476
 477        seq_printf(m, "type         : givcipher\n");
 478        seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
 479                                             "yes" : "no");
 480        seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
 481        seq_printf(m, "min keysize  : %u\n", ablkcipher->min_keysize);
 482        seq_printf(m, "max keysize  : %u\n", ablkcipher->max_keysize);
 483        seq_printf(m, "ivsize       : %u\n", ablkcipher->ivsize);
 484        seq_printf(m, "geniv        : %s\n", ablkcipher->geniv ?: "<built-in>");
 485}
 486
 487const struct crypto_type crypto_givcipher_type = {
 488        .ctxsize = crypto_ablkcipher_ctxsize,
 489        .init = crypto_init_givcipher_ops,
 490#ifdef CONFIG_PROC_FS
 491        .show = crypto_givcipher_show,
 492#endif
 493        .report = crypto_givcipher_report,
 494};
 495EXPORT_SYMBOL_GPL(crypto_givcipher_type);
 496