linux/crypto/ablkcipher.c
<<
>>
Prefs
   1/*
   2 * Asynchronous block chaining cipher operations.
   3 *
   4 * This is the asynchronous version of blkcipher.c indicating completion
   5 * via a callback.
   6 *
   7 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
   8 *
   9 * This program is free software; you can redistribute it and/or modify it
  10 * under the terms of the GNU General Public License as published by the Free
  11 * Software Foundation; either version 2 of the License, or (at your option)
  12 * any later version.
  13 *
  14 */
  15
  16#include <crypto/internal/skcipher.h>
  17#include <linux/err.h>
  18#include <linux/kernel.h>
  19#include <linux/slab.h>
  20#include <linux/seq_file.h>
  21#include <linux/cryptouser.h>
  22#include <linux/compiler.h>
  23#include <net/netlink.h>
  24
  25#include <crypto/scatterwalk.h>
  26
  27#include "internal.h"
  28
  29struct ablkcipher_buffer {
  30        struct list_head        entry;
  31        struct scatter_walk     dst;
  32        unsigned int            len;
  33        void                    *data;
  34};
  35
  36enum {
  37        ABLKCIPHER_WALK_SLOW = 1 << 0,
  38};
  39
  40static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
  41{
  42        scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
  43}
  44
  45void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
  46{
  47        struct ablkcipher_buffer *p, *tmp;
  48
  49        list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
  50                ablkcipher_buffer_write(p);
  51                list_del(&p->entry);
  52                kfree(p);
  53        }
  54}
  55EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
  56
  57static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
  58                                          struct ablkcipher_buffer *p)
  59{
  60        p->dst = walk->out;
  61        list_add_tail(&p->entry, &walk->buffers);
  62}
  63
  64/* Get a spot of the specified length that does not straddle a page.
  65 * The caller needs to ensure that there is enough space for this operation.
  66 */
  67static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
  68{
  69        u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
  70
  71        return max(start, end_page);
  72}
  73
  74static inline void ablkcipher_done_slow(struct ablkcipher_walk *walk,
  75                                        unsigned int n)
  76{
  77        for (;;) {
  78                unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
  79
  80                if (len_this_page > n)
  81                        len_this_page = n;
  82                scatterwalk_advance(&walk->out, n);
  83                if (n == len_this_page)
  84                        break;
  85                n -= len_this_page;
  86                scatterwalk_start(&walk->out, sg_next(walk->out.sg));
  87        }
  88}
  89
  90static inline void ablkcipher_done_fast(struct ablkcipher_walk *walk,
  91                                        unsigned int n)
  92{
  93        scatterwalk_advance(&walk->in, n);
  94        scatterwalk_advance(&walk->out, n);
  95}
  96
  97static int ablkcipher_walk_next(struct ablkcipher_request *req,
  98                                struct ablkcipher_walk *walk);
  99
 100int ablkcipher_walk_done(struct ablkcipher_request *req,
 101                         struct ablkcipher_walk *walk, int err)
 102{
 103        struct crypto_tfm *tfm = req->base.tfm;
 104        unsigned int n; /* bytes processed */
 105        bool more;
 106
 107        if (unlikely(err < 0))
 108                goto finish;
 109
 110        n = walk->nbytes - err;
 111        walk->total -= n;
 112        more = (walk->total != 0);
 113
 114        if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) {
 115                ablkcipher_done_fast(walk, n);
 116        } else {
 117                if (WARN_ON(err)) {
 118                        /* unexpected case; didn't process all bytes */
 119                        err = -EINVAL;
 120                        goto finish;
 121                }
 122                ablkcipher_done_slow(walk, n);
 123        }
 124
 125        scatterwalk_done(&walk->in, 0, more);
 126        scatterwalk_done(&walk->out, 1, more);
 127
 128        if (more) {
 129                crypto_yield(req->base.flags);
 130                return ablkcipher_walk_next(req, walk);
 131        }
 132        err = 0;
 133finish:
 134        walk->nbytes = 0;
 135        if (walk->iv != req->info)
 136                memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
 137        kfree(walk->iv_buffer);
 138        return err;
 139}
 140EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
 141
 142static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
 143                                       struct ablkcipher_walk *walk,
 144                                       unsigned int bsize,
 145                                       unsigned int alignmask,
 146                                       void **src_p, void **dst_p)
 147{
 148        unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
 149        struct ablkcipher_buffer *p;
 150        void *src, *dst, *base;
 151        unsigned int n;
 152
 153        n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
 154        n += (aligned_bsize * 3 - (alignmask + 1) +
 155              (alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
 156
 157        p = kmalloc(n, GFP_ATOMIC);
 158        if (!p)
 159                return ablkcipher_walk_done(req, walk, -ENOMEM);
 160
 161        base = p + 1;
 162
 163        dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
 164        src = dst = ablkcipher_get_spot(dst, bsize);
 165
 166        p->len = bsize;
 167        p->data = dst;
 168
 169        scatterwalk_copychunks(src, &walk->in, bsize, 0);
 170
 171        ablkcipher_queue_write(walk, p);
 172
 173        walk->nbytes = bsize;
 174        walk->flags |= ABLKCIPHER_WALK_SLOW;
 175
 176        *src_p = src;
 177        *dst_p = dst;
 178
 179        return 0;
 180}
 181
 182static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
 183                                     struct crypto_tfm *tfm,
 184                                     unsigned int alignmask)
 185{
 186        unsigned bs = walk->blocksize;
 187        unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
 188        unsigned aligned_bs = ALIGN(bs, alignmask + 1);
 189        unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
 190                            (alignmask + 1);
 191        u8 *iv;
 192
 193        size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
 194        walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
 195        if (!walk->iv_buffer)
 196                return -ENOMEM;
 197
 198        iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
 199        iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
 200        iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
 201        iv = ablkcipher_get_spot(iv, ivsize);
 202
 203        walk->iv = memcpy(iv, walk->iv, ivsize);
 204        return 0;
 205}
 206
 207static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
 208                                       struct ablkcipher_walk *walk)
 209{
 210        walk->src.page = scatterwalk_page(&walk->in);
 211        walk->src.offset = offset_in_page(walk->in.offset);
 212        walk->dst.page = scatterwalk_page(&walk->out);
 213        walk->dst.offset = offset_in_page(walk->out.offset);
 214
 215        return 0;
 216}
 217
 218static int ablkcipher_walk_next(struct ablkcipher_request *req,
 219                                struct ablkcipher_walk *walk)
 220{
 221        struct crypto_tfm *tfm = req->base.tfm;
 222        unsigned int alignmask, bsize, n;
 223        void *src, *dst;
 224        int err;
 225
 226        alignmask = crypto_tfm_alg_alignmask(tfm);
 227        n = walk->total;
 228        if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
 229                req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
 230                return ablkcipher_walk_done(req, walk, -EINVAL);
 231        }
 232
 233        walk->flags &= ~ABLKCIPHER_WALK_SLOW;
 234        src = dst = NULL;
 235
 236        bsize = min(walk->blocksize, n);
 237        n = scatterwalk_clamp(&walk->in, n);
 238        n = scatterwalk_clamp(&walk->out, n);
 239
 240        if (n < bsize ||
 241            !scatterwalk_aligned(&walk->in, alignmask) ||
 242            !scatterwalk_aligned(&walk->out, alignmask)) {
 243                err = ablkcipher_next_slow(req, walk, bsize, alignmask,
 244                                           &src, &dst);
 245                goto set_phys_lowmem;
 246        }
 247
 248        walk->nbytes = n;
 249
 250        return ablkcipher_next_fast(req, walk);
 251
 252set_phys_lowmem:
 253        if (err >= 0) {
 254                walk->src.page = virt_to_page(src);
 255                walk->dst.page = virt_to_page(dst);
 256                walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
 257                walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
 258        }
 259
 260        return err;
 261}
 262
 263static int ablkcipher_walk_first(struct ablkcipher_request *req,
 264                                 struct ablkcipher_walk *walk)
 265{
 266        struct crypto_tfm *tfm = req->base.tfm;
 267        unsigned int alignmask;
 268
 269        alignmask = crypto_tfm_alg_alignmask(tfm);
 270        if (WARN_ON_ONCE(in_irq()))
 271                return -EDEADLK;
 272
 273        walk->iv = req->info;
 274        walk->nbytes = walk->total;
 275        if (unlikely(!walk->total))
 276                return 0;
 277
 278        walk->iv_buffer = NULL;
 279        if (unlikely(((unsigned long)walk->iv & alignmask))) {
 280                int err = ablkcipher_copy_iv(walk, tfm, alignmask);
 281
 282                if (err)
 283                        return err;
 284        }
 285
 286        scatterwalk_start(&walk->in, walk->in.sg);
 287        scatterwalk_start(&walk->out, walk->out.sg);
 288
 289        return ablkcipher_walk_next(req, walk);
 290}
 291
 292int ablkcipher_walk_phys(struct ablkcipher_request *req,
 293                         struct ablkcipher_walk *walk)
 294{
 295        walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
 296        return ablkcipher_walk_first(req, walk);
 297}
 298EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
 299
 300static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
 301                            unsigned int keylen)
 302{
 303        struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
 304        unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
 305        int ret;
 306        u8 *buffer, *alignbuffer;
 307        unsigned long absize;
 308
 309        absize = keylen + alignmask;
 310        buffer = kmalloc(absize, GFP_ATOMIC);
 311        if (!buffer)
 312                return -ENOMEM;
 313
 314        alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
 315        memcpy(alignbuffer, key, keylen);
 316        ret = cipher->setkey(tfm, alignbuffer, keylen);
 317        memset(alignbuffer, 0, keylen);
 318        kfree(buffer);
 319        return ret;
 320}
 321
 322static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 323                  unsigned int keylen)
 324{
 325        struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
 326        unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
 327
 328        if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
 329                crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 330                return -EINVAL;
 331        }
 332
 333        if ((unsigned long)key & alignmask)
 334                return setkey_unaligned(tfm, key, keylen);
 335
 336        return cipher->setkey(tfm, key, keylen);
 337}
 338
 339static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
 340                                              u32 mask)
 341{
 342        return alg->cra_ctxsize;
 343}
 344
 345static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
 346                                      u32 mask)
 347{
 348        struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
 349        struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
 350
 351        if (alg->ivsize > PAGE_SIZE / 8)
 352                return -EINVAL;
 353
 354        crt->setkey = setkey;
 355        crt->encrypt = alg->encrypt;
 356        crt->decrypt = alg->decrypt;
 357        crt->base = __crypto_ablkcipher_cast(tfm);
 358        crt->ivsize = alg->ivsize;
 359
 360        return 0;
 361}
 362
 363#ifdef CONFIG_NET
 364static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
 365{
 366        struct crypto_report_blkcipher rblkcipher;
 367
 368        memset(&rblkcipher, 0, sizeof(rblkcipher));
 369
 370        strscpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
 371        strscpy(rblkcipher.geniv, "<default>", sizeof(rblkcipher.geniv));
 372
 373        rblkcipher.blocksize = alg->cra_blocksize;
 374        rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
 375        rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
 376        rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
 377
 378        return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
 379                       sizeof(rblkcipher), &rblkcipher);
 380}
 381#else
 382static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
 383{
 384        return -ENOSYS;
 385}
 386#endif
 387
 388static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
 389        __maybe_unused;
 390static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
 391{
 392        struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
 393
 394        seq_printf(m, "type         : ablkcipher\n");
 395        seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
 396                                             "yes" : "no");
 397        seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
 398        seq_printf(m, "min keysize  : %u\n", ablkcipher->min_keysize);
 399        seq_printf(m, "max keysize  : %u\n", ablkcipher->max_keysize);
 400        seq_printf(m, "ivsize       : %u\n", ablkcipher->ivsize);
 401        seq_printf(m, "geniv        : <default>\n");
 402}
 403
 404const struct crypto_type crypto_ablkcipher_type = {
 405        .ctxsize = crypto_ablkcipher_ctxsize,
 406        .init = crypto_init_ablkcipher_ops,
 407#ifdef CONFIG_PROC_FS
 408        .show = crypto_ablkcipher_show,
 409#endif
 410        .report = crypto_ablkcipher_report,
 411};
 412EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
 413