linux/crypto/ablkcipher.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Asynchronous block chaining cipher operations.
   4 *
   5 * This is the asynchronous version of blkcipher.c indicating completion
   6 * via a callback.
   7 *
   8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
   9 */
  10
  11#include <crypto/internal/skcipher.h>
  12#include <linux/err.h>
  13#include <linux/kernel.h>
  14#include <linux/slab.h>
  15#include <linux/seq_file.h>
  16#include <linux/cryptouser.h>
  17#include <linux/compiler.h>
  18#include <net/netlink.h>
  19
  20#include <crypto/scatterwalk.h>
  21
  22#include "internal.h"
  23
  24struct ablkcipher_buffer {
  25        struct list_head        entry;
  26        struct scatter_walk     dst;
  27        unsigned int            len;
  28        void                    *data;
  29};
  30
  31enum {
  32        ABLKCIPHER_WALK_SLOW = 1 << 0,
  33};
  34
  35static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
  36{
  37        scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
  38}
  39
  40void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
  41{
  42        struct ablkcipher_buffer *p, *tmp;
  43
  44        list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
  45                ablkcipher_buffer_write(p);
  46                list_del(&p->entry);
  47                kfree(p);
  48        }
  49}
  50EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
  51
  52static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
  53                                          struct ablkcipher_buffer *p)
  54{
  55        p->dst = walk->out;
  56        list_add_tail(&p->entry, &walk->buffers);
  57}
  58
  59/* Get a spot of the specified length that does not straddle a page.
  60 * The caller needs to ensure that there is enough space for this operation.
  61 */
  62static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
  63{
  64        u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
  65
  66        return max(start, end_page);
  67}
  68
  69static inline void ablkcipher_done_slow(struct ablkcipher_walk *walk,
  70                                        unsigned int n)
  71{
  72        for (;;) {
  73                unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
  74
  75                if (len_this_page > n)
  76                        len_this_page = n;
  77                scatterwalk_advance(&walk->out, n);
  78                if (n == len_this_page)
  79                        break;
  80                n -= len_this_page;
  81                scatterwalk_start(&walk->out, sg_next(walk->out.sg));
  82        }
  83}
  84
  85static inline void ablkcipher_done_fast(struct ablkcipher_walk *walk,
  86                                        unsigned int n)
  87{
  88        scatterwalk_advance(&walk->in, n);
  89        scatterwalk_advance(&walk->out, n);
  90}
  91
  92static int ablkcipher_walk_next(struct ablkcipher_request *req,
  93                                struct ablkcipher_walk *walk);
  94
  95int ablkcipher_walk_done(struct ablkcipher_request *req,
  96                         struct ablkcipher_walk *walk, int err)
  97{
  98        struct crypto_tfm *tfm = req->base.tfm;
  99        unsigned int n; /* bytes processed */
 100        bool more;
 101
 102        if (unlikely(err < 0))
 103                goto finish;
 104
 105        n = walk->nbytes - err;
 106        walk->total -= n;
 107        more = (walk->total != 0);
 108
 109        if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) {
 110                ablkcipher_done_fast(walk, n);
 111        } else {
 112                if (WARN_ON(err)) {
 113                        /* unexpected case; didn't process all bytes */
 114                        err = -EINVAL;
 115                        goto finish;
 116                }
 117                ablkcipher_done_slow(walk, n);
 118        }
 119
 120        scatterwalk_done(&walk->in, 0, more);
 121        scatterwalk_done(&walk->out, 1, more);
 122
 123        if (more) {
 124                crypto_yield(req->base.flags);
 125                return ablkcipher_walk_next(req, walk);
 126        }
 127        err = 0;
 128finish:
 129        walk->nbytes = 0;
 130        if (walk->iv != req->info)
 131                memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
 132        kfree(walk->iv_buffer);
 133        return err;
 134}
 135EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
 136
 137static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
 138                                       struct ablkcipher_walk *walk,
 139                                       unsigned int bsize,
 140                                       unsigned int alignmask,
 141                                       void **src_p, void **dst_p)
 142{
 143        unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
 144        struct ablkcipher_buffer *p;
 145        void *src, *dst, *base;
 146        unsigned int n;
 147
 148        n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
 149        n += (aligned_bsize * 3 - (alignmask + 1) +
 150              (alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
 151
 152        p = kmalloc(n, GFP_ATOMIC);
 153        if (!p)
 154                return ablkcipher_walk_done(req, walk, -ENOMEM);
 155
 156        base = p + 1;
 157
 158        dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
 159        src = dst = ablkcipher_get_spot(dst, bsize);
 160
 161        p->len = bsize;
 162        p->data = dst;
 163
 164        scatterwalk_copychunks(src, &walk->in, bsize, 0);
 165
 166        ablkcipher_queue_write(walk, p);
 167
 168        walk->nbytes = bsize;
 169        walk->flags |= ABLKCIPHER_WALK_SLOW;
 170
 171        *src_p = src;
 172        *dst_p = dst;
 173
 174        return 0;
 175}
 176
 177static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
 178                                     struct crypto_tfm *tfm,
 179                                     unsigned int alignmask)
 180{
 181        unsigned bs = walk->blocksize;
 182        unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
 183        unsigned aligned_bs = ALIGN(bs, alignmask + 1);
 184        unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
 185                            (alignmask + 1);
 186        u8 *iv;
 187
 188        size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
 189        walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
 190        if (!walk->iv_buffer)
 191                return -ENOMEM;
 192
 193        iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
 194        iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
 195        iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
 196        iv = ablkcipher_get_spot(iv, ivsize);
 197
 198        walk->iv = memcpy(iv, walk->iv, ivsize);
 199        return 0;
 200}
 201
 202static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
 203                                       struct ablkcipher_walk *walk)
 204{
 205        walk->src.page = scatterwalk_page(&walk->in);
 206        walk->src.offset = offset_in_page(walk->in.offset);
 207        walk->dst.page = scatterwalk_page(&walk->out);
 208        walk->dst.offset = offset_in_page(walk->out.offset);
 209
 210        return 0;
 211}
 212
 213static int ablkcipher_walk_next(struct ablkcipher_request *req,
 214                                struct ablkcipher_walk *walk)
 215{
 216        struct crypto_tfm *tfm = req->base.tfm;
 217        unsigned int alignmask, bsize, n;
 218        void *src, *dst;
 219        int err;
 220
 221        alignmask = crypto_tfm_alg_alignmask(tfm);
 222        n = walk->total;
 223        if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
 224                req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
 225                return ablkcipher_walk_done(req, walk, -EINVAL);
 226        }
 227
 228        walk->flags &= ~ABLKCIPHER_WALK_SLOW;
 229        src = dst = NULL;
 230
 231        bsize = min(walk->blocksize, n);
 232        n = scatterwalk_clamp(&walk->in, n);
 233        n = scatterwalk_clamp(&walk->out, n);
 234
 235        if (n < bsize ||
 236            !scatterwalk_aligned(&walk->in, alignmask) ||
 237            !scatterwalk_aligned(&walk->out, alignmask)) {
 238                err = ablkcipher_next_slow(req, walk, bsize, alignmask,
 239                                           &src, &dst);
 240                goto set_phys_lowmem;
 241        }
 242
 243        walk->nbytes = n;
 244
 245        return ablkcipher_next_fast(req, walk);
 246
 247set_phys_lowmem:
 248        if (err >= 0) {
 249                walk->src.page = virt_to_page(src);
 250                walk->dst.page = virt_to_page(dst);
 251                walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
 252                walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
 253        }
 254
 255        return err;
 256}
 257
 258static int ablkcipher_walk_first(struct ablkcipher_request *req,
 259                                 struct ablkcipher_walk *walk)
 260{
 261        struct crypto_tfm *tfm = req->base.tfm;
 262        unsigned int alignmask;
 263
 264        alignmask = crypto_tfm_alg_alignmask(tfm);
 265        if (WARN_ON_ONCE(in_irq()))
 266                return -EDEADLK;
 267
 268        walk->iv = req->info;
 269        walk->nbytes = walk->total;
 270        if (unlikely(!walk->total))
 271                return 0;
 272
 273        walk->iv_buffer = NULL;
 274        if (unlikely(((unsigned long)walk->iv & alignmask))) {
 275                int err = ablkcipher_copy_iv(walk, tfm, alignmask);
 276
 277                if (err)
 278                        return err;
 279        }
 280
 281        scatterwalk_start(&walk->in, walk->in.sg);
 282        scatterwalk_start(&walk->out, walk->out.sg);
 283
 284        return ablkcipher_walk_next(req, walk);
 285}
 286
 287int ablkcipher_walk_phys(struct ablkcipher_request *req,
 288                         struct ablkcipher_walk *walk)
 289{
 290        walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
 291        return ablkcipher_walk_first(req, walk);
 292}
 293EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
 294
 295static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
 296                            unsigned int keylen)
 297{
 298        struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
 299        unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
 300        int ret;
 301        u8 *buffer, *alignbuffer;
 302        unsigned long absize;
 303
 304        absize = keylen + alignmask;
 305        buffer = kmalloc(absize, GFP_ATOMIC);
 306        if (!buffer)
 307                return -ENOMEM;
 308
 309        alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
 310        memcpy(alignbuffer, key, keylen);
 311        ret = cipher->setkey(tfm, alignbuffer, keylen);
 312        memset(alignbuffer, 0, keylen);
 313        kfree(buffer);
 314        return ret;
 315}
 316
 317static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 318                  unsigned int keylen)
 319{
 320        struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
 321        unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
 322
 323        if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
 324                crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 325                return -EINVAL;
 326        }
 327
 328        if ((unsigned long)key & alignmask)
 329                return setkey_unaligned(tfm, key, keylen);
 330
 331        return cipher->setkey(tfm, key, keylen);
 332}
 333
 334static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
 335                                              u32 mask)
 336{
 337        return alg->cra_ctxsize;
 338}
 339
 340static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
 341                                      u32 mask)
 342{
 343        struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
 344        struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
 345
 346        if (alg->ivsize > PAGE_SIZE / 8)
 347                return -EINVAL;
 348
 349        crt->setkey = setkey;
 350        crt->encrypt = alg->encrypt;
 351        crt->decrypt = alg->decrypt;
 352        crt->base = __crypto_ablkcipher_cast(tfm);
 353        crt->ivsize = alg->ivsize;
 354
 355        return 0;
 356}
 357
 358#ifdef CONFIG_NET
 359static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
 360{
 361        struct crypto_report_blkcipher rblkcipher;
 362
 363        memset(&rblkcipher, 0, sizeof(rblkcipher));
 364
 365        strscpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
 366        strscpy(rblkcipher.geniv, "<default>", sizeof(rblkcipher.geniv));
 367
 368        rblkcipher.blocksize = alg->cra_blocksize;
 369        rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
 370        rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
 371        rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
 372
 373        return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
 374                       sizeof(rblkcipher), &rblkcipher);
 375}
 376#else
 377static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
 378{
 379        return -ENOSYS;
 380}
 381#endif
 382
 383static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
 384        __maybe_unused;
 385static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
 386{
 387        struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
 388
 389        seq_printf(m, "type         : ablkcipher\n");
 390        seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
 391                                             "yes" : "no");
 392        seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
 393        seq_printf(m, "min keysize  : %u\n", ablkcipher->min_keysize);
 394        seq_printf(m, "max keysize  : %u\n", ablkcipher->max_keysize);
 395        seq_printf(m, "ivsize       : %u\n", ablkcipher->ivsize);
 396        seq_printf(m, "geniv        : <default>\n");
 397}
 398
 399const struct crypto_type crypto_ablkcipher_type = {
 400        .ctxsize = crypto_ablkcipher_ctxsize,
 401        .init = crypto_init_ablkcipher_ops,
 402#ifdef CONFIG_PROC_FS
 403        .show = crypto_ablkcipher_show,
 404#endif
 405        .report = crypto_ablkcipher_report,
 406};
 407EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
 408