linux/include/linux/crypto.h
<<
>>
Prefs
   1/*
   2 * Scatterlist Cryptographic API.
   3 *
   4 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
   5 * Copyright (c) 2002 David S. Miller (davem@redhat.com)
   6 * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
   7 *
   8 * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
   9 * and Nettle, by Niels Möller.
  10 * 
  11 * This program is free software; you can redistribute it and/or modify it
  12 * under the terms of the GNU General Public License as published by the Free
  13 * Software Foundation; either version 2 of the License, or (at your option) 
  14 * any later version.
  15 *
  16 */
  17#ifndef _LINUX_CRYPTO_H
  18#define _LINUX_CRYPTO_H
  19
  20#include <linux/atomic.h>
  21#include <linux/kernel.h>
  22#include <linux/list.h>
  23#include <linux/bug.h>
  24#include <linux/slab.h>
  25#include <linux/string.h>
  26#include <linux/uaccess.h>
  27
  28/*
  29 * Autoloaded crypto modules should only use a prefixed name to avoid allowing
  30 * arbitrary modules to be loaded. Loading from userspace may still need the
  31 * unprefixed names, so retains those aliases as well.
  32 * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
  33 * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
  34 * expands twice on the same line. Instead, use a separate base name for the
  35 * alias.
  36 */
  37#define MODULE_ALIAS_CRYPTO(name)       \
  38                __MODULE_INFO(alias, alias_userspace, name);    \
  39                __MODULE_INFO(alias, alias_crypto, "crypto-" name)
  40
  41/*
  42 * Algorithm masks and types.
  43 */
  44#define CRYPTO_ALG_TYPE_MASK            0x0000000f
  45#define CRYPTO_ALG_TYPE_CIPHER          0x00000001
  46#define CRYPTO_ALG_TYPE_COMPRESS        0x00000002
  47#define CRYPTO_ALG_TYPE_AEAD            0x00000003
  48#define CRYPTO_ALG_TYPE_BLKCIPHER       0x00000004
  49#define CRYPTO_ALG_TYPE_ABLKCIPHER      0x00000005
  50#define CRYPTO_ALG_TYPE_GIVCIPHER       0x00000006
  51#define CRYPTO_ALG_TYPE_DIGEST          0x00000008
  52#define CRYPTO_ALG_TYPE_HASH            0x00000008
  53#define CRYPTO_ALG_TYPE_SHASH           0x00000009
  54#define CRYPTO_ALG_TYPE_AHASH           0x0000000a
  55#define CRYPTO_ALG_TYPE_RNG             0x0000000c
  56#define CRYPTO_ALG_TYPE_AKCIPHER        0x0000000d
  57
  58#define CRYPTO_ALG_TYPE_HASH_MASK       0x0000000e
  59#define CRYPTO_ALG_TYPE_AHASH_MASK      0x0000000c
  60#define CRYPTO_ALG_TYPE_BLKCIPHER_MASK  0x0000000c
  61
  62#define CRYPTO_ALG_LARVAL               0x00000010
  63#define CRYPTO_ALG_DEAD                 0x00000020
  64#define CRYPTO_ALG_DYING                0x00000040
  65#define CRYPTO_ALG_ASYNC                0x00000080
  66
  67/*
  68 * Set this bit if and only if the algorithm requires another algorithm of
  69 * the same type to handle corner cases.
  70 */
  71#define CRYPTO_ALG_NEED_FALLBACK        0x00000100
  72
  73/*
  74 * This bit is set for symmetric key ciphers that have already been wrapped
  75 * with a generic IV generator to prevent them from being wrapped again.
  76 */
  77#define CRYPTO_ALG_GENIV                0x00000200
  78
  79/*
  80 * Set if the algorithm has passed automated run-time testing.  Note that
  81 * if there is no run-time testing for a given algorithm it is considered
  82 * to have passed.
  83 */
  84
  85#define CRYPTO_ALG_TESTED               0x00000400
  86
  87/*
  88 * Set if the algorithm is an instance that is build from templates.
  89 */
  90#define CRYPTO_ALG_INSTANCE             0x00000800
  91
  92/* Set this bit if the algorithm provided is hardware accelerated but
  93 * not available to userspace via instruction set or so.
  94 */
  95#define CRYPTO_ALG_KERN_DRIVER_ONLY     0x00001000
  96
  97/*
  98 * Mark a cipher as a service implementation only usable by another
  99 * cipher and never by a normal user of the kernel crypto API
 100 */
 101#define CRYPTO_ALG_INTERNAL             0x00002000
 102
 103/*
 104 * Transform masks and values (for crt_flags).
 105 */
 106#define CRYPTO_TFM_REQ_MASK             0x000fff00
 107#define CRYPTO_TFM_RES_MASK             0xfff00000
 108
 109#define CRYPTO_TFM_REQ_WEAK_KEY         0x00000100
 110#define CRYPTO_TFM_REQ_MAY_SLEEP        0x00000200
 111#define CRYPTO_TFM_REQ_MAY_BACKLOG      0x00000400
 112#define CRYPTO_TFM_RES_WEAK_KEY         0x00100000
 113#define CRYPTO_TFM_RES_BAD_KEY_LEN      0x00200000
 114#define CRYPTO_TFM_RES_BAD_KEY_SCHED    0x00400000
 115#define CRYPTO_TFM_RES_BAD_BLOCK_LEN    0x00800000
 116#define CRYPTO_TFM_RES_BAD_FLAGS        0x01000000
 117
 118/*
 119 * Miscellaneous stuff.
 120 */
 121#define CRYPTO_MAX_ALG_NAME             64
 122
 123/*
 124 * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual
 125 * declaration) is used to ensure that the crypto_tfm context structure is
 126 * aligned correctly for the given architecture so that there are no alignment
 127 * faults for C data types.  In particular, this is required on platforms such
 128 * as arm where pointers are 32-bit aligned but there are data types such as
 129 * u64 which require 64-bit alignment.
 130 */
 131#define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN
 132
 133#define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN)))
 134
 135struct scatterlist;
 136struct crypto_ablkcipher;
 137struct crypto_async_request;
 138struct crypto_blkcipher;
 139struct crypto_tfm;
 140struct crypto_type;
 141struct skcipher_givcrypt_request;
 142
 143typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
 144
 145/**
 146 * DOC: Block Cipher Context Data Structures
 147 *
 148 * These data structures define the operating context for each block cipher
 149 * type.
 150 */
 151
 152struct crypto_async_request {
 153        struct list_head list;
 154        crypto_completion_t complete;
 155        void *data;
 156        struct crypto_tfm *tfm;
 157
 158        u32 flags;
 159};
 160
 161struct ablkcipher_request {
 162        struct crypto_async_request base;
 163
 164        unsigned int nbytes;
 165
 166        void *info;
 167
 168        struct scatterlist *src;
 169        struct scatterlist *dst;
 170
 171        void *__ctx[] CRYPTO_MINALIGN_ATTR;
 172};
 173
 174struct blkcipher_desc {
 175        struct crypto_blkcipher *tfm;
 176        void *info;
 177        u32 flags;
 178};
 179
 180struct cipher_desc {
 181        struct crypto_tfm *tfm;
 182        void (*crfn)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
 183        unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst,
 184                             const u8 *src, unsigned int nbytes);
 185        void *info;
 186};
 187
 188/**
 189 * DOC: Block Cipher Algorithm Definitions
 190 *
 191 * These data structures define modular crypto algorithm implementations,
 192 * managed via crypto_register_alg() and crypto_unregister_alg().
 193 */
 194
 195/**
 196 * struct ablkcipher_alg - asynchronous block cipher definition
 197 * @min_keysize: Minimum key size supported by the transformation. This is the
 198 *               smallest key length supported by this transformation algorithm.
 199 *               This must be set to one of the pre-defined values as this is
 200 *               not hardware specific. Possible values for this field can be
 201 *               found via git grep "_MIN_KEY_SIZE" include/crypto/
 202 * @max_keysize: Maximum key size supported by the transformation. This is the
 203 *               largest key length supported by this transformation algorithm.
 204 *               This must be set to one of the pre-defined values as this is
 205 *               not hardware specific. Possible values for this field can be
 206 *               found via git grep "_MAX_KEY_SIZE" include/crypto/
 207 * @setkey: Set key for the transformation. This function is used to either
 208 *          program a supplied key into the hardware or store the key in the
 209 *          transformation context for programming it later. Note that this
 210 *          function does modify the transformation context. This function can
 211 *          be called multiple times during the existence of the transformation
 212 *          object, so one must make sure the key is properly reprogrammed into
 213 *          the hardware. This function is also responsible for checking the key
 214 *          length for validity. In case a software fallback was put in place in
 215 *          the @cra_init call, this function might need to use the fallback if
 216 *          the algorithm doesn't support all of the key sizes.
 217 * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt
 218 *           the supplied scatterlist containing the blocks of data. The crypto
 219 *           API consumer is responsible for aligning the entries of the
 220 *           scatterlist properly and making sure the chunks are correctly
 221 *           sized. In case a software fallback was put in place in the
 222 *           @cra_init call, this function might need to use the fallback if
 223 *           the algorithm doesn't support all of the key sizes. In case the
 224 *           key was stored in transformation context, the key might need to be
 225 *           re-programmed into the hardware in this function. This function
 226 *           shall not modify the transformation context, as this function may
 227 *           be called in parallel with the same transformation object.
 228 * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt
 229 *           and the conditions are exactly the same.
 230 * @givencrypt: Update the IV for encryption. With this function, a cipher
 231 *              implementation may provide the function on how to update the IV
 232 *              for encryption.
 233 * @givdecrypt: Update the IV for decryption. This is the reverse of
 234 *              @givencrypt .
 235 * @geniv: The transformation implementation may use an "IV generator" provided
 236 *         by the kernel crypto API. Several use cases have a predefined
 237 *         approach how IVs are to be updated. For such use cases, the kernel
 238 *         crypto API provides ready-to-use implementations that can be
 239 *         referenced with this variable.
 240 * @ivsize: IV size applicable for transformation. The consumer must provide an
 241 *          IV of exactly that size to perform the encrypt or decrypt operation.
 242 *
 243 * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
 244 * mandatory and must be filled.
 245 */
 246struct ablkcipher_alg {
 247        int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
 248                      unsigned int keylen);
 249        int (*encrypt)(struct ablkcipher_request *req);
 250        int (*decrypt)(struct ablkcipher_request *req);
 251        int (*givencrypt)(struct skcipher_givcrypt_request *req);
 252        int (*givdecrypt)(struct skcipher_givcrypt_request *req);
 253
 254        const char *geniv;
 255
 256        unsigned int min_keysize;
 257        unsigned int max_keysize;
 258        unsigned int ivsize;
 259};
 260
 261/**
 262 * struct blkcipher_alg - synchronous block cipher definition
 263 * @min_keysize: see struct ablkcipher_alg
 264 * @max_keysize: see struct ablkcipher_alg
 265 * @setkey: see struct ablkcipher_alg
 266 * @encrypt: see struct ablkcipher_alg
 267 * @decrypt: see struct ablkcipher_alg
 268 * @geniv: see struct ablkcipher_alg
 269 * @ivsize: see struct ablkcipher_alg
 270 *
 271 * All fields except @geniv and @ivsize are mandatory and must be filled.
 272 */
 273struct blkcipher_alg {
 274        int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
 275                      unsigned int keylen);
 276        int (*encrypt)(struct blkcipher_desc *desc,
 277                       struct scatterlist *dst, struct scatterlist *src,
 278                       unsigned int nbytes);
 279        int (*decrypt)(struct blkcipher_desc *desc,
 280                       struct scatterlist *dst, struct scatterlist *src,
 281                       unsigned int nbytes);
 282
 283        const char *geniv;
 284
 285        unsigned int min_keysize;
 286        unsigned int max_keysize;
 287        unsigned int ivsize;
 288};
 289
 290/**
 291 * struct cipher_alg - single-block symmetric ciphers definition
 292 * @cia_min_keysize: Minimum key size supported by the transformation. This is
 293 *                   the smallest key length supported by this transformation
 294 *                   algorithm. This must be set to one of the pre-defined
 295 *                   values as this is not hardware specific. Possible values
 296 *                   for this field can be found via git grep "_MIN_KEY_SIZE"
 297 *                   include/crypto/
 298 * @cia_max_keysize: Maximum key size supported by the transformation. This is
 299 *                  the largest key length supported by this transformation
 300 *                  algorithm. This must be set to one of the pre-defined values
 301 *                  as this is not hardware specific. Possible values for this
 302 *                  field can be found via git grep "_MAX_KEY_SIZE"
 303 *                  include/crypto/
 304 * @cia_setkey: Set key for the transformation. This function is used to either
 305 *              program a supplied key into the hardware or store the key in the
 306 *              transformation context for programming it later. Note that this
 307 *              function does modify the transformation context. This function
 308 *              can be called multiple times during the existence of the
 309 *              transformation object, so one must make sure the key is properly
 310 *              reprogrammed into the hardware. This function is also
 311 *              responsible for checking the key length for validity.
 312 * @cia_encrypt: Encrypt a single block. This function is used to encrypt a
 313 *               single block of data, which must be @cra_blocksize big. This
 314 *               always operates on a full @cra_blocksize and it is not possible
 315 *               to encrypt a block of smaller size. The supplied buffers must
 316 *               therefore also be at least of @cra_blocksize size. Both the
 317 *               input and output buffers are always aligned to @cra_alignmask.
 318 *               In case either of the input or output buffer supplied by user
 319 *               of the crypto API is not aligned to @cra_alignmask, the crypto
 320 *               API will re-align the buffers. The re-alignment means that a
 321 *               new buffer will be allocated, the data will be copied into the
 322 *               new buffer, then the processing will happen on the new buffer,
 323 *               then the data will be copied back into the original buffer and
 324 *               finally the new buffer will be freed. In case a software
 325 *               fallback was put in place in the @cra_init call, this function
 326 *               might need to use the fallback if the algorithm doesn't support
 327 *               all of the key sizes. In case the key was stored in
 328 *               transformation context, the key might need to be re-programmed
 329 *               into the hardware in this function. This function shall not
 330 *               modify the transformation context, as this function may be
 331 *               called in parallel with the same transformation object.
 332 * @cia_decrypt: Decrypt a single block. This is a reverse counterpart to
 333 *               @cia_encrypt, and the conditions are exactly the same.
 334 *
 335 * All fields are mandatory and must be filled.
 336 */
 337struct cipher_alg {
 338        unsigned int cia_min_keysize;
 339        unsigned int cia_max_keysize;
 340        int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key,
 341                          unsigned int keylen);
 342        void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
 343        void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
 344};
 345
 346struct compress_alg {
 347        int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src,
 348                            unsigned int slen, u8 *dst, unsigned int *dlen);
 349        int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src,
 350                              unsigned int slen, u8 *dst, unsigned int *dlen);
 351};
 352
 353
 354#define cra_ablkcipher  cra_u.ablkcipher
 355#define cra_blkcipher   cra_u.blkcipher
 356#define cra_cipher      cra_u.cipher
 357#define cra_compress    cra_u.compress
 358
 359/**
 360 * struct crypto_alg - definition of a cryptograpic cipher algorithm
 361 * @cra_flags: Flags describing this transformation. See include/linux/crypto.h
 362 *             CRYPTO_ALG_* flags for the flags which go in here. Those are
 363 *             used for fine-tuning the description of the transformation
 364 *             algorithm.
 365 * @cra_blocksize: Minimum block size of this transformation. The size in bytes
 366 *                 of the smallest possible unit which can be transformed with
 367 *                 this algorithm. The users must respect this value.
 368 *                 In case of HASH transformation, it is possible for a smaller
 369 *                 block than @cra_blocksize to be passed to the crypto API for
 370 *                 transformation, in case of any other transformation type, an
 371 *                 error will be returned upon any attempt to transform smaller
 372 *                 than @cra_blocksize chunks.
 373 * @cra_ctxsize: Size of the operational context of the transformation. This
 374 *               value informs the kernel crypto API about the memory size
 375 *               needed to be allocated for the transformation context.
 376 * @cra_alignmask: Alignment mask for the input and output data buffer. The data
 377 *                 buffer containing the input data for the algorithm must be
 378 *                 aligned to this alignment mask. The data buffer for the
 379 *                 output data must be aligned to this alignment mask. Note that
 380 *                 the Crypto API will do the re-alignment in software, but
 381 *                 only under special conditions and there is a performance hit.
 382 *                 The re-alignment happens at these occasions for different
 383 *                 @cra_u types: cipher -- For both input data and output data
 384 *                 buffer; ahash -- For output hash destination buf; shash --
 385 *                 For output hash destination buf.
 386 *                 This is needed on hardware which is flawed by design and
 387 *                 cannot pick data from arbitrary addresses.
 388 * @cra_priority: Priority of this transformation implementation. In case
 389 *                multiple transformations with same @cra_name are available to
 390 *                the Crypto API, the kernel will use the one with highest
 391 *                @cra_priority.
 392 * @cra_name: Generic name (usable by multiple implementations) of the
 393 *            transformation algorithm. This is the name of the transformation
 394 *            itself. This field is used by the kernel when looking up the
 395 *            providers of particular transformation.
 396 * @cra_driver_name: Unique name of the transformation provider. This is the
 397 *                   name of the provider of the transformation. This can be any
 398 *                   arbitrary value, but in the usual case, this contains the
 399 *                   name of the chip or provider and the name of the
 400 *                   transformation algorithm.
 401 * @cra_type: Type of the cryptographic transformation. This is a pointer to
 402 *            struct crypto_type, which implements callbacks common for all
 403 *            transformation types. There are multiple options:
 404 *            &crypto_blkcipher_type, &crypto_ablkcipher_type,
 405 *            &crypto_ahash_type, &crypto_rng_type.
 406 *            This field might be empty. In that case, there are no common
 407 *            callbacks. This is the case for: cipher, compress, shash.
 408 * @cra_u: Callbacks implementing the transformation. This is a union of
 409 *         multiple structures. Depending on the type of transformation selected
 410 *         by @cra_type and @cra_flags above, the associated structure must be
 411 *         filled with callbacks. This field might be empty. This is the case
 412 *         for ahash, shash.
 413 * @cra_init: Initialize the cryptographic transformation object. This function
 414 *            is used to initialize the cryptographic transformation object.
 415 *            This function is called only once at the instantiation time, right
 416 *            after the transformation context was allocated. In case the
 417 *            cryptographic hardware has some special requirements which need to
 418 *            be handled by software, this function shall check for the precise
 419 *            requirement of the transformation and put any software fallbacks
 420 *            in place.
 421 * @cra_exit: Deinitialize the cryptographic transformation object. This is a
 422 *            counterpart to @cra_init, used to remove various changes set in
 423 *            @cra_init.
 424 * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE
 425 * @cra_list: internally used
 426 * @cra_users: internally used
 427 * @cra_refcnt: internally used
 428 * @cra_destroy: internally used
 429 *
 430 * The struct crypto_alg describes a generic Crypto API algorithm and is common
 431 * for all of the transformations. Any variable not documented here shall not
 432 * be used by a cipher implementation as it is internal to the Crypto API.
 433 */
 434struct crypto_alg {
 435        struct list_head cra_list;
 436        struct list_head cra_users;
 437
 438        u32 cra_flags;
 439        unsigned int cra_blocksize;
 440        unsigned int cra_ctxsize;
 441        unsigned int cra_alignmask;
 442
 443        int cra_priority;
 444        atomic_t cra_refcnt;
 445
 446        char cra_name[CRYPTO_MAX_ALG_NAME];
 447        char cra_driver_name[CRYPTO_MAX_ALG_NAME];
 448
 449        const struct crypto_type *cra_type;
 450
 451        union {
 452                struct ablkcipher_alg ablkcipher;
 453                struct blkcipher_alg blkcipher;
 454                struct cipher_alg cipher;
 455                struct compress_alg compress;
 456        } cra_u;
 457
 458        int (*cra_init)(struct crypto_tfm *tfm);
 459        void (*cra_exit)(struct crypto_tfm *tfm);
 460        void (*cra_destroy)(struct crypto_alg *alg);
 461        
 462        struct module *cra_module;
 463} CRYPTO_MINALIGN_ATTR;
 464
 465/*
 466 * Algorithm registration interface.
 467 */
 468int crypto_register_alg(struct crypto_alg *alg);
 469int crypto_unregister_alg(struct crypto_alg *alg);
 470int crypto_register_algs(struct crypto_alg *algs, int count);
 471int crypto_unregister_algs(struct crypto_alg *algs, int count);
 472
 473/*
 474 * Algorithm query interface.
 475 */
 476int crypto_has_alg(const char *name, u32 type, u32 mask);
 477
 478/*
 479 * Transforms: user-instantiated objects which encapsulate algorithms
 480 * and core processing logic.  Managed via crypto_alloc_*() and
 481 * crypto_free_*(), as well as the various helpers below.
 482 */
 483
 484struct ablkcipher_tfm {
 485        int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
 486                      unsigned int keylen);
 487        int (*encrypt)(struct ablkcipher_request *req);
 488        int (*decrypt)(struct ablkcipher_request *req);
 489        int (*givencrypt)(struct skcipher_givcrypt_request *req);
 490        int (*givdecrypt)(struct skcipher_givcrypt_request *req);
 491
 492        struct crypto_ablkcipher *base;
 493
 494        unsigned int ivsize;
 495        unsigned int reqsize;
 496};
 497
 498struct blkcipher_tfm {
 499        void *iv;
 500        int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
 501                      unsigned int keylen);
 502        int (*encrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
 503                       struct scatterlist *src, unsigned int nbytes);
 504        int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
 505                       struct scatterlist *src, unsigned int nbytes);
 506};
 507
 508struct cipher_tfm {
 509        int (*cit_setkey)(struct crypto_tfm *tfm,
 510                          const u8 *key, unsigned int keylen);
 511        void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
 512        void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
 513};
 514
 515struct compress_tfm {
 516        int (*cot_compress)(struct crypto_tfm *tfm,
 517                            const u8 *src, unsigned int slen,
 518                            u8 *dst, unsigned int *dlen);
 519        int (*cot_decompress)(struct crypto_tfm *tfm,
 520                              const u8 *src, unsigned int slen,
 521                              u8 *dst, unsigned int *dlen);
 522};
 523
 524#define crt_ablkcipher  crt_u.ablkcipher
 525#define crt_blkcipher   crt_u.blkcipher
 526#define crt_cipher      crt_u.cipher
 527#define crt_compress    crt_u.compress
 528
 529struct crypto_tfm {
 530
 531        u32 crt_flags;
 532        
 533        union {
 534                struct ablkcipher_tfm ablkcipher;
 535                struct blkcipher_tfm blkcipher;
 536                struct cipher_tfm cipher;
 537                struct compress_tfm compress;
 538        } crt_u;
 539
 540        void (*exit)(struct crypto_tfm *tfm);
 541        
 542        struct crypto_alg *__crt_alg;
 543
 544        void *__crt_ctx[] CRYPTO_MINALIGN_ATTR;
 545};
 546
 547struct crypto_ablkcipher {
 548        struct crypto_tfm base;
 549};
 550
 551struct crypto_blkcipher {
 552        struct crypto_tfm base;
 553};
 554
 555struct crypto_cipher {
 556        struct crypto_tfm base;
 557};
 558
 559struct crypto_comp {
 560        struct crypto_tfm base;
 561};
 562
 563enum {
 564        CRYPTOA_UNSPEC,
 565        CRYPTOA_ALG,
 566        CRYPTOA_TYPE,
 567        CRYPTOA_U32,
 568        __CRYPTOA_MAX,
 569};
 570
 571#define CRYPTOA_MAX (__CRYPTOA_MAX - 1)
 572
 573/* Maximum number of (rtattr) parameters for each template. */
 574#define CRYPTO_MAX_ATTRS 32
 575
 576struct crypto_attr_alg {
 577        char name[CRYPTO_MAX_ALG_NAME];
 578};
 579
 580struct crypto_attr_type {
 581        u32 type;
 582        u32 mask;
 583};
 584
 585struct crypto_attr_u32 {
 586        u32 num;
 587};
 588
 589/* 
 590 * Transform user interface.
 591 */
 592 
 593struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask);
 594void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm);
 595
 596static inline void crypto_free_tfm(struct crypto_tfm *tfm)
 597{
 598        return crypto_destroy_tfm(tfm, tfm);
 599}
 600
 601int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
 602
 603/*
 604 * Transform helpers which query the underlying algorithm.
 605 */
 606static inline const char *crypto_tfm_alg_name(struct crypto_tfm *tfm)
 607{
 608        return tfm->__crt_alg->cra_name;
 609}
 610
 611static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm)
 612{
 613        return tfm->__crt_alg->cra_driver_name;
 614}
 615
 616static inline int crypto_tfm_alg_priority(struct crypto_tfm *tfm)
 617{
 618        return tfm->__crt_alg->cra_priority;
 619}
 620
 621static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
 622{
 623        return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
 624}
 625
 626static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm)
 627{
 628        return tfm->__crt_alg->cra_blocksize;
 629}
 630
 631static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm)
 632{
 633        return tfm->__crt_alg->cra_alignmask;
 634}
 635
 636static inline u32 crypto_tfm_get_flags(struct crypto_tfm *tfm)
 637{
 638        return tfm->crt_flags;
 639}
 640
 641static inline void crypto_tfm_set_flags(struct crypto_tfm *tfm, u32 flags)
 642{
 643        tfm->crt_flags |= flags;
 644}
 645
 646static inline void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags)
 647{
 648        tfm->crt_flags &= ~flags;
 649}
 650
 651static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
 652{
 653        return tfm->__crt_ctx;
 654}
 655
 656static inline unsigned int crypto_tfm_ctx_alignment(void)
 657{
 658        struct crypto_tfm *tfm;
 659        return __alignof__(tfm->__crt_ctx);
 660}
 661
 662/*
 663 * API wrappers.
 664 */
 665static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast(
 666        struct crypto_tfm *tfm)
 667{
 668        return (struct crypto_ablkcipher *)tfm;
 669}
 670
 671static inline u32 crypto_skcipher_type(u32 type)
 672{
 673        type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
 674        type |= CRYPTO_ALG_TYPE_BLKCIPHER;
 675        return type;
 676}
 677
 678static inline u32 crypto_skcipher_mask(u32 mask)
 679{
 680        mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
 681        mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK;
 682        return mask;
 683}
 684
 685/**
 686 * DOC: Asynchronous Block Cipher API
 687 *
 688 * Asynchronous block cipher API is used with the ciphers of type
 689 * CRYPTO_ALG_TYPE_ABLKCIPHER (listed as type "ablkcipher" in /proc/crypto).
 690 *
 691 * Asynchronous cipher operations imply that the function invocation for a
 692 * cipher request returns immediately before the completion of the operation.
 693 * The cipher request is scheduled as a separate kernel thread and therefore
 694 * load-balanced on the different CPUs via the process scheduler. To allow
 695 * the kernel crypto API to inform the caller about the completion of a cipher
 696 * request, the caller must provide a callback function. That function is
 697 * invoked with the cipher handle when the request completes.
 698 *
 699 * To support the asynchronous operation, additional information than just the
 700 * cipher handle must be supplied to the kernel crypto API. That additional
 701 * information is given by filling in the ablkcipher_request data structure.
 702 *
 703 * For the asynchronous block cipher API, the state is maintained with the tfm
 704 * cipher handle. A single tfm can be used across multiple calls and in
 705 * parallel. For asynchronous block cipher calls, context data supplied and
 706 * only used by the caller can be referenced the request data structure in
 707 * addition to the IV used for the cipher request. The maintenance of such
 708 * state information would be important for a crypto driver implementer to
 709 * have, because when calling the callback function upon completion of the
 710 * cipher operation, that callback function may need some information about
 711 * which operation just finished if it invoked multiple in parallel. This
 712 * state information is unused by the kernel crypto API.
 713 */
 714
 715/**
 716 * crypto_alloc_ablkcipher() - allocate asynchronous block cipher handle
 717 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
 718 *            ablkcipher cipher
 719 * @type: specifies the type of the cipher
 720 * @mask: specifies the mask for the cipher
 721 *
 722 * Allocate a cipher handle for an ablkcipher. The returned struct
 723 * crypto_ablkcipher is the cipher handle that is required for any subsequent
 724 * API invocation for that ablkcipher.
 725 *
 726 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
 727 *         of an error, PTR_ERR() returns the error code.
 728 */
 729struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
 730                                                  u32 type, u32 mask);
 731
 732static inline struct crypto_tfm *crypto_ablkcipher_tfm(
 733        struct crypto_ablkcipher *tfm)
 734{
 735        return &tfm->base;
 736}
 737
 738/**
 739 * crypto_free_ablkcipher() - zeroize and free cipher handle
 740 * @tfm: cipher handle to be freed
 741 */
 742static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm)
 743{
 744        crypto_free_tfm(crypto_ablkcipher_tfm(tfm));
 745}
 746
 747/**
 748 * crypto_has_ablkcipher() - Search for the availability of an ablkcipher.
 749 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
 750 *            ablkcipher
 751 * @type: specifies the type of the cipher
 752 * @mask: specifies the mask for the cipher
 753 *
 754 * Return: true when the ablkcipher is known to the kernel crypto API; false
 755 *         otherwise
 756 */
 757static inline int crypto_has_ablkcipher(const char *alg_name, u32 type,
 758                                        u32 mask)
 759{
 760        return crypto_has_alg(alg_name, crypto_skcipher_type(type),
 761                              crypto_skcipher_mask(mask));
 762}
 763
 764static inline struct ablkcipher_tfm *crypto_ablkcipher_crt(
 765        struct crypto_ablkcipher *tfm)
 766{
 767        return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher;
 768}
 769
 770/**
 771 * crypto_ablkcipher_ivsize() - obtain IV size
 772 * @tfm: cipher handle
 773 *
 774 * The size of the IV for the ablkcipher referenced by the cipher handle is
 775 * returned. This IV size may be zero if the cipher does not need an IV.
 776 *
 777 * Return: IV size in bytes
 778 */
 779static inline unsigned int crypto_ablkcipher_ivsize(
 780        struct crypto_ablkcipher *tfm)
 781{
 782        return crypto_ablkcipher_crt(tfm)->ivsize;
 783}
 784
 785/**
 786 * crypto_ablkcipher_blocksize() - obtain block size of cipher
 787 * @tfm: cipher handle
 788 *
 789 * The block size for the ablkcipher referenced with the cipher handle is
 790 * returned. The caller may use that information to allocate appropriate
 791 * memory for the data returned by the encryption or decryption operation
 792 *
 793 * Return: block size of cipher
 794 */
 795static inline unsigned int crypto_ablkcipher_blocksize(
 796        struct crypto_ablkcipher *tfm)
 797{
 798        return crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(tfm));
 799}
 800
 801static inline unsigned int crypto_ablkcipher_alignmask(
 802        struct crypto_ablkcipher *tfm)
 803{
 804        return crypto_tfm_alg_alignmask(crypto_ablkcipher_tfm(tfm));
 805}
 806
 807static inline u32 crypto_ablkcipher_get_flags(struct crypto_ablkcipher *tfm)
 808{
 809        return crypto_tfm_get_flags(crypto_ablkcipher_tfm(tfm));
 810}
 811
 812static inline void crypto_ablkcipher_set_flags(struct crypto_ablkcipher *tfm,
 813                                               u32 flags)
 814{
 815        crypto_tfm_set_flags(crypto_ablkcipher_tfm(tfm), flags);
 816}
 817
 818static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm,
 819                                                 u32 flags)
 820{
 821        crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags);
 822}
 823
 824/**
 825 * crypto_ablkcipher_setkey() - set key for cipher
 826 * @tfm: cipher handle
 827 * @key: buffer holding the key
 828 * @keylen: length of the key in bytes
 829 *
 830 * The caller provided key is set for the ablkcipher referenced by the cipher
 831 * handle.
 832 *
 833 * Note, the key length determines the cipher type. Many block ciphers implement
 834 * different cipher modes depending on the key size, such as AES-128 vs AES-192
 835 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
 836 * is performed.
 837 *
 838 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
 839 */
 840static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
 841                                           const u8 *key, unsigned int keylen)
 842{
 843        struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(tfm);
 844
 845        return crt->setkey(crt->base, key, keylen);
 846}
 847
 848/**
 849 * crypto_ablkcipher_reqtfm() - obtain cipher handle from request
 850 * @req: ablkcipher_request out of which the cipher handle is to be obtained
 851 *
 852 * Return the crypto_ablkcipher handle when furnishing an ablkcipher_request
 853 * data structure.
 854 *
 855 * Return: crypto_ablkcipher handle
 856 */
 857static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm(
 858        struct ablkcipher_request *req)
 859{
 860        return __crypto_ablkcipher_cast(req->base.tfm);
 861}
 862
 863/**
 864 * crypto_ablkcipher_encrypt() - encrypt plaintext
 865 * @req: reference to the ablkcipher_request handle that holds all information
 866 *       needed to perform the cipher operation
 867 *
 868 * Encrypt plaintext data using the ablkcipher_request handle. That data
 869 * structure and how it is filled with data is discussed with the
 870 * ablkcipher_request_* functions.
 871 *
 872 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
 873 */
 874static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
 875{
 876        struct ablkcipher_tfm *crt =
 877                crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
 878        return crt->encrypt(req);
 879}
 880
 881/**
 882 * crypto_ablkcipher_decrypt() - decrypt ciphertext
 883 * @req: reference to the ablkcipher_request handle that holds all information
 884 *       needed to perform the cipher operation
 885 *
 886 * Decrypt ciphertext data using the ablkcipher_request handle. That data
 887 * structure and how it is filled with data is discussed with the
 888 * ablkcipher_request_* functions.
 889 *
 890 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
 891 */
 892static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
 893{
 894        struct ablkcipher_tfm *crt =
 895                crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
 896        return crt->decrypt(req);
 897}
 898
 899/**
 900 * DOC: Asynchronous Cipher Request Handle
 901 *
 902 * The ablkcipher_request data structure contains all pointers to data
 903 * required for the asynchronous cipher operation. This includes the cipher
 904 * handle (which can be used by multiple ablkcipher_request instances), pointer
 905 * to plaintext and ciphertext, asynchronous callback function, etc. It acts
 906 * as a handle to the ablkcipher_request_* API calls in a similar way as
 907 * ablkcipher handle to the crypto_ablkcipher_* API calls.
 908 */
 909
 910/**
 911 * crypto_ablkcipher_reqsize() - obtain size of the request data structure
 912 * @tfm: cipher handle
 913 *
 914 * Return: number of bytes
 915 */
 916static inline unsigned int crypto_ablkcipher_reqsize(
 917        struct crypto_ablkcipher *tfm)
 918{
 919        return crypto_ablkcipher_crt(tfm)->reqsize;
 920}
 921
 922/**
 923 * ablkcipher_request_set_tfm() - update cipher handle reference in request
 924 * @req: request handle to be modified
 925 * @tfm: cipher handle that shall be added to the request handle
 926 *
 927 * Allow the caller to replace the existing ablkcipher handle in the request
 928 * data structure with a different one.
 929 */
 930static inline void ablkcipher_request_set_tfm(
 931        struct ablkcipher_request *req, struct crypto_ablkcipher *tfm)
 932{
 933        req->base.tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_crt(tfm)->base);
 934}
 935
 936static inline struct ablkcipher_request *ablkcipher_request_cast(
 937        struct crypto_async_request *req)
 938{
 939        return container_of(req, struct ablkcipher_request, base);
 940}
 941
 942/**
 943 * ablkcipher_request_alloc() - allocate request data structure
 944 * @tfm: cipher handle to be registered with the request
 945 * @gfp: memory allocation flag that is handed to kmalloc by the API call.
 946 *
 947 * Allocate the request data structure that must be used with the ablkcipher
 948 * encrypt and decrypt API calls. During the allocation, the provided ablkcipher
 949 * handle is registered in the request data structure.
 950 *
 951 * Return: allocated request handle in case of success; IS_ERR() is true in case
 952 *         of an error, PTR_ERR() returns the error code.
 953 */
 954static inline struct ablkcipher_request *ablkcipher_request_alloc(
 955        struct crypto_ablkcipher *tfm, gfp_t gfp)
 956{
 957        struct ablkcipher_request *req;
 958
 959        req = kmalloc(sizeof(struct ablkcipher_request) +
 960                      crypto_ablkcipher_reqsize(tfm), gfp);
 961
 962        if (likely(req))
 963                ablkcipher_request_set_tfm(req, tfm);
 964
 965        return req;
 966}
 967
 968/**
 969 * ablkcipher_request_free() - zeroize and free request data structure
 970 * @req: request data structure cipher handle to be freed
 971 */
 972static inline void ablkcipher_request_free(struct ablkcipher_request *req)
 973{
 974        kzfree(req);
 975}
 976
 977/**
 978 * ablkcipher_request_set_callback() - set asynchronous callback function
 979 * @req: request handle
 980 * @flags: specify zero or an ORing of the flags
 981 *         CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
 982 *         increase the wait queue beyond the initial maximum size;
 983 *         CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
 984 * @compl: callback function pointer to be registered with the request handle
 985 * @data: The data pointer refers to memory that is not used by the kernel
 986 *        crypto API, but provided to the callback function for it to use. Here,
 987 *        the caller can provide a reference to memory the callback function can
 988 *        operate on. As the callback function is invoked asynchronously to the
 989 *        related functionality, it may need to access data structures of the
 990 *        related functionality which can be referenced using this pointer. The
 991 *        callback function can access the memory via the "data" field in the
 992 *        crypto_async_request data structure provided to the callback function.
 993 *
 994 * This function allows setting the callback function that is triggered once the
 995 * cipher operation completes.
 996 *
 997 * The callback function is registered with the ablkcipher_request handle and
 998 * must comply with the following template
 999 *
1000 *      void callback_function(struct crypto_async_request *req, int error)
1001 */
1002static inline void ablkcipher_request_set_callback(
1003        struct ablkcipher_request *req,
1004        u32 flags, crypto_completion_t compl, void *data)
1005{
1006        req->base.complete = compl;
1007        req->base.data = data;
1008        req->base.flags = flags;
1009}
1010
1011/**
1012 * ablkcipher_request_set_crypt() - set data buffers
1013 * @req: request handle
1014 * @src: source scatter / gather list
1015 * @dst: destination scatter / gather list
1016 * @nbytes: number of bytes to process from @src
1017 * @iv: IV for the cipher operation which must comply with the IV size defined
1018 *      by crypto_ablkcipher_ivsize
1019 *
1020 * This function allows setting of the source data and destination data
1021 * scatter / gather lists.
1022 *
1023 * For encryption, the source is treated as the plaintext and the
1024 * destination is the ciphertext. For a decryption operation, the use is
1025 * reversed - the source is the ciphertext and the destination is the plaintext.
1026 */
1027static inline void ablkcipher_request_set_crypt(
1028        struct ablkcipher_request *req,
1029        struct scatterlist *src, struct scatterlist *dst,
1030        unsigned int nbytes, void *iv)
1031{
1032        req->src = src;
1033        req->dst = dst;
1034        req->nbytes = nbytes;
1035        req->info = iv;
1036}
1037
1038/**
1039 * DOC: Synchronous Block Cipher API
1040 *
1041 * The synchronous block cipher API is used with the ciphers of type
1042 * CRYPTO_ALG_TYPE_BLKCIPHER (listed as type "blkcipher" in /proc/crypto)
1043 *
1044 * Synchronous calls, have a context in the tfm. But since a single tfm can be
1045 * used in multiple calls and in parallel, this info should not be changeable
1046 * (unless a lock is used). This applies, for example, to the symmetric key.
1047 * However, the IV is changeable, so there is an iv field in blkcipher_tfm
1048 * structure for synchronous blkcipher api. So, its the only state info that can
1049 * be kept for synchronous calls without using a big lock across a tfm.
1050 *
1051 * The block cipher API allows the use of a complete cipher, i.e. a cipher
1052 * consisting of a template (a block chaining mode) and a single block cipher
1053 * primitive (e.g. AES).
1054 *
1055 * The plaintext data buffer and the ciphertext data buffer are pointed to
1056 * by using scatter/gather lists. The cipher operation is performed
1057 * on all segments of the provided scatter/gather lists.
1058 *
1059 * The kernel crypto API supports a cipher operation "in-place" which means that
1060 * the caller may provide the same scatter/gather list for the plaintext and
1061 * cipher text. After the completion of the cipher operation, the plaintext
1062 * data is replaced with the ciphertext data in case of an encryption and vice
1063 * versa for a decryption. The caller must ensure that the scatter/gather lists
1064 * for the output data point to sufficiently large buffers, i.e. multiples of
1065 * the block size of the cipher.
1066 */
1067
1068static inline struct crypto_blkcipher *__crypto_blkcipher_cast(
1069        struct crypto_tfm *tfm)
1070{
1071        return (struct crypto_blkcipher *)tfm;
1072}
1073
1074static inline struct crypto_blkcipher *crypto_blkcipher_cast(
1075        struct crypto_tfm *tfm)
1076{
1077        BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_BLKCIPHER);
1078        return __crypto_blkcipher_cast(tfm);
1079}
1080
1081/**
1082 * crypto_alloc_blkcipher() - allocate synchronous block cipher handle
1083 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1084 *            blkcipher cipher
1085 * @type: specifies the type of the cipher
1086 * @mask: specifies the mask for the cipher
1087 *
1088 * Allocate a cipher handle for a block cipher. The returned struct
1089 * crypto_blkcipher is the cipher handle that is required for any subsequent
1090 * API invocation for that block cipher.
1091 *
1092 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
1093 *         of an error, PTR_ERR() returns the error code.
1094 */
1095static inline struct crypto_blkcipher *crypto_alloc_blkcipher(
1096        const char *alg_name, u32 type, u32 mask)
1097{
1098        type &= ~CRYPTO_ALG_TYPE_MASK;
1099        type |= CRYPTO_ALG_TYPE_BLKCIPHER;
1100        mask |= CRYPTO_ALG_TYPE_MASK;
1101
1102        return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask));
1103}
1104
1105static inline struct crypto_tfm *crypto_blkcipher_tfm(
1106        struct crypto_blkcipher *tfm)
1107{
1108        return &tfm->base;
1109}
1110
1111/**
1112 * crypto_free_blkcipher() - zeroize and free the block cipher handle
1113 * @tfm: cipher handle to be freed
1114 */
1115static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm)
1116{
1117        crypto_free_tfm(crypto_blkcipher_tfm(tfm));
1118}
1119
1120/**
1121 * crypto_has_blkcipher() - Search for the availability of a block cipher
1122 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1123 *            block cipher
1124 * @type: specifies the type of the cipher
1125 * @mask: specifies the mask for the cipher
1126 *
1127 * Return: true when the block cipher is known to the kernel crypto API; false
1128 *         otherwise
1129 */
1130static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask)
1131{
1132        type &= ~CRYPTO_ALG_TYPE_MASK;
1133        type |= CRYPTO_ALG_TYPE_BLKCIPHER;
1134        mask |= CRYPTO_ALG_TYPE_MASK;
1135
1136        return crypto_has_alg(alg_name, type, mask);
1137}
1138
1139/**
1140 * crypto_blkcipher_name() - return the name / cra_name from the cipher handle
1141 * @tfm: cipher handle
1142 *
1143 * Return: The character string holding the name of the cipher
1144 */
1145static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm)
1146{
1147        return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm));
1148}
1149
1150static inline struct blkcipher_tfm *crypto_blkcipher_crt(
1151        struct crypto_blkcipher *tfm)
1152{
1153        return &crypto_blkcipher_tfm(tfm)->crt_blkcipher;
1154}
1155
1156static inline struct blkcipher_alg *crypto_blkcipher_alg(
1157        struct crypto_blkcipher *tfm)
1158{
1159        return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher;
1160}
1161
1162/**
1163 * crypto_blkcipher_ivsize() - obtain IV size
1164 * @tfm: cipher handle
1165 *
1166 * The size of the IV for the block cipher referenced by the cipher handle is
1167 * returned. This IV size may be zero if the cipher does not need an IV.
1168 *
1169 * Return: IV size in bytes
1170 */
1171static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm)
1172{
1173        return crypto_blkcipher_alg(tfm)->ivsize;
1174}
1175
1176/**
1177 * crypto_blkcipher_blocksize() - obtain block size of cipher
1178 * @tfm: cipher handle
1179 *
1180 * The block size for the block cipher referenced with the cipher handle is
1181 * returned. The caller may use that information to allocate appropriate
1182 * memory for the data returned by the encryption or decryption operation.
1183 *
1184 * Return: block size of cipher
1185 */
1186static inline unsigned int crypto_blkcipher_blocksize(
1187        struct crypto_blkcipher *tfm)
1188{
1189        return crypto_tfm_alg_blocksize(crypto_blkcipher_tfm(tfm));
1190}
1191
1192static inline unsigned int crypto_blkcipher_alignmask(
1193        struct crypto_blkcipher *tfm)
1194{
1195        return crypto_tfm_alg_alignmask(crypto_blkcipher_tfm(tfm));
1196}
1197
1198static inline u32 crypto_blkcipher_get_flags(struct crypto_blkcipher *tfm)
1199{
1200        return crypto_tfm_get_flags(crypto_blkcipher_tfm(tfm));
1201}
1202
1203static inline void crypto_blkcipher_set_flags(struct crypto_blkcipher *tfm,
1204                                              u32 flags)
1205{
1206        crypto_tfm_set_flags(crypto_blkcipher_tfm(tfm), flags);
1207}
1208
1209static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm,
1210                                                u32 flags)
1211{
1212        crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags);
1213}
1214
1215/**
1216 * crypto_blkcipher_setkey() - set key for cipher
1217 * @tfm: cipher handle
1218 * @key: buffer holding the key
1219 * @keylen: length of the key in bytes
1220 *
1221 * The caller provided key is set for the block cipher referenced by the cipher
1222 * handle.
1223 *
1224 * Note, the key length determines the cipher type. Many block ciphers implement
1225 * different cipher modes depending on the key size, such as AES-128 vs AES-192
1226 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
1227 * is performed.
1228 *
1229 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
1230 */
1231static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm,
1232                                          const u8 *key, unsigned int keylen)
1233{
1234        return crypto_blkcipher_crt(tfm)->setkey(crypto_blkcipher_tfm(tfm),
1235                                                 key, keylen);
1236}
1237
1238/**
1239 * crypto_blkcipher_encrypt() - encrypt plaintext
1240 * @desc: reference to the block cipher handle with meta data
1241 * @dst: scatter/gather list that is filled by the cipher operation with the
1242 *      ciphertext
1243 * @src: scatter/gather list that holds the plaintext
1244 * @nbytes: number of bytes of the plaintext to encrypt.
1245 *
1246 * Encrypt plaintext data using the IV set by the caller with a preceding
1247 * call of crypto_blkcipher_set_iv.
1248 *
1249 * The blkcipher_desc data structure must be filled by the caller and can
1250 * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
1251 * with the block cipher handle; desc.flags is filled with either
1252 * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
1253 *
1254 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1255 */
1256static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc,
1257                                           struct scatterlist *dst,
1258                                           struct scatterlist *src,
1259                                           unsigned int nbytes)
1260{
1261        desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
1262        return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
1263}
1264
1265/**
1266 * crypto_blkcipher_encrypt_iv() - encrypt plaintext with dedicated IV
1267 * @desc: reference to the block cipher handle with meta data
1268 * @dst: scatter/gather list that is filled by the cipher operation with the
1269 *      ciphertext
1270 * @src: scatter/gather list that holds the plaintext
1271 * @nbytes: number of bytes of the plaintext to encrypt.
1272 *
1273 * Encrypt plaintext data with the use of an IV that is solely used for this
1274 * cipher operation. Any previously set IV is not used.
1275 *
1276 * The blkcipher_desc data structure must be filled by the caller and can
1277 * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
1278 * with the block cipher handle; desc.info is filled with the IV to be used for
1279 * the current operation; desc.flags is filled with either
1280 * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
1281 *
1282 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1283 */
1284static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc,
1285                                              struct scatterlist *dst,
1286                                              struct scatterlist *src,
1287                                              unsigned int nbytes)
1288{
1289        return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
1290}
1291
1292/**
1293 * crypto_blkcipher_decrypt() - decrypt ciphertext
1294 * @desc: reference to the block cipher handle with meta data
1295 * @dst: scatter/gather list that is filled by the cipher operation with the
1296 *      plaintext
1297 * @src: scatter/gather list that holds the ciphertext
1298 * @nbytes: number of bytes of the ciphertext to decrypt.
1299 *
1300 * Decrypt ciphertext data using the IV set by the caller with a preceding
1301 * call of crypto_blkcipher_set_iv.
1302 *
1303 * The blkcipher_desc data structure must be filled by the caller as documented
1304 * for the crypto_blkcipher_encrypt call above.
1305 *
1306 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1307 *
1308 */
1309static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc,
1310                                           struct scatterlist *dst,
1311                                           struct scatterlist *src,
1312                                           unsigned int nbytes)
1313{
1314        desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
1315        return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
1316}
1317
1318/**
1319 * crypto_blkcipher_decrypt_iv() - decrypt ciphertext with dedicated IV
1320 * @desc: reference to the block cipher handle with meta data
1321 * @dst: scatter/gather list that is filled by the cipher operation with the
1322 *      plaintext
1323 * @src: scatter/gather list that holds the ciphertext
1324 * @nbytes: number of bytes of the ciphertext to decrypt.
1325 *
1326 * Decrypt ciphertext data with the use of an IV that is solely used for this
1327 * cipher operation. Any previously set IV is not used.
1328 *
1329 * The blkcipher_desc data structure must be filled by the caller as documented
1330 * for the crypto_blkcipher_encrypt_iv call above.
1331 *
1332 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1333 */
1334static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc,
1335                                              struct scatterlist *dst,
1336                                              struct scatterlist *src,
1337                                              unsigned int nbytes)
1338{
1339        return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
1340}
1341
1342/**
1343 * crypto_blkcipher_set_iv() - set IV for cipher
1344 * @tfm: cipher handle
1345 * @src: buffer holding the IV
1346 * @len: length of the IV in bytes
1347 *
1348 * The caller provided IV is set for the block cipher referenced by the cipher
1349 * handle.
1350 */
1351static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm,
1352                                           const u8 *src, unsigned int len)
1353{
1354        memcpy(crypto_blkcipher_crt(tfm)->iv, src, len);
1355}
1356
1357/**
1358 * crypto_blkcipher_get_iv() - obtain IV from cipher
1359 * @tfm: cipher handle
1360 * @dst: buffer filled with the IV
1361 * @len: length of the buffer dst
1362 *
1363 * The caller can obtain the IV set for the block cipher referenced by the
1364 * cipher handle and store it into the user-provided buffer. If the buffer
1365 * has an insufficient space, the IV is truncated to fit the buffer.
1366 */
1367static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm,
1368                                           u8 *dst, unsigned int len)
1369{
1370        memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len);
1371}
1372
1373/**
1374 * DOC: Single Block Cipher API
1375 *
1376 * The single block cipher API is used with the ciphers of type
1377 * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto).
1378 *
1379 * Using the single block cipher API calls, operations with the basic cipher
1380 * primitive can be implemented. These cipher primitives exclude any block
1381 * chaining operations including IV handling.
1382 *
1383 * The purpose of this single block cipher API is to support the implementation
1384 * of templates or other concepts that only need to perform the cipher operation
1385 * on one block at a time. Templates invoke the underlying cipher primitive
1386 * block-wise and process either the input or the output data of these cipher
1387 * operations.
1388 */
1389
1390static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm)
1391{
1392        return (struct crypto_cipher *)tfm;
1393}
1394
1395static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm)
1396{
1397        BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
1398        return __crypto_cipher_cast(tfm);
1399}
1400
1401/**
1402 * crypto_alloc_cipher() - allocate single block cipher handle
1403 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1404 *           single block cipher
1405 * @type: specifies the type of the cipher
1406 * @mask: specifies the mask for the cipher
1407 *
1408 * Allocate a cipher handle for a single block cipher. The returned struct
1409 * crypto_cipher is the cipher handle that is required for any subsequent API
1410 * invocation for that single block cipher.
1411 *
1412 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
1413 *         of an error, PTR_ERR() returns the error code.
1414 */
1415static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name,
1416                                                        u32 type, u32 mask)
1417{
1418        type &= ~CRYPTO_ALG_TYPE_MASK;
1419        type |= CRYPTO_ALG_TYPE_CIPHER;
1420        mask |= CRYPTO_ALG_TYPE_MASK;
1421
1422        return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask));
1423}
1424
1425static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm)
1426{
1427        return &tfm->base;
1428}
1429
1430/**
1431 * crypto_free_cipher() - zeroize and free the single block cipher handle
1432 * @tfm: cipher handle to be freed
1433 */
1434static inline void crypto_free_cipher(struct crypto_cipher *tfm)
1435{
1436        crypto_free_tfm(crypto_cipher_tfm(tfm));
1437}
1438
1439/**
1440 * crypto_has_cipher() - Search for the availability of a single block cipher
1441 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1442 *           single block cipher
1443 * @type: specifies the type of the cipher
1444 * @mask: specifies the mask for the cipher
1445 *
1446 * Return: true when the single block cipher is known to the kernel crypto API;
1447 *         false otherwise
1448 */
1449static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask)
1450{
1451        type &= ~CRYPTO_ALG_TYPE_MASK;
1452        type |= CRYPTO_ALG_TYPE_CIPHER;
1453        mask |= CRYPTO_ALG_TYPE_MASK;
1454
1455        return crypto_has_alg(alg_name, type, mask);
1456}
1457
1458static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm)
1459{
1460        return &crypto_cipher_tfm(tfm)->crt_cipher;
1461}
1462
1463/**
1464 * crypto_cipher_blocksize() - obtain block size for cipher
1465 * @tfm: cipher handle
1466 *
1467 * The block size for the single block cipher referenced with the cipher handle
1468 * tfm is returned. The caller may use that information to allocate appropriate
1469 * memory for the data returned by the encryption or decryption operation
1470 *
1471 * Return: block size of cipher
1472 */
1473static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm)
1474{
1475        return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm));
1476}
1477
1478static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm)
1479{
1480        return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm));
1481}
1482
1483static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm)
1484{
1485        return crypto_tfm_get_flags(crypto_cipher_tfm(tfm));
1486}
1487
1488static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm,
1489                                           u32 flags)
1490{
1491        crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags);
1492}
1493
1494static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm,
1495                                             u32 flags)
1496{
1497        crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags);
1498}
1499
1500/**
1501 * crypto_cipher_setkey() - set key for cipher
1502 * @tfm: cipher handle
1503 * @key: buffer holding the key
1504 * @keylen: length of the key in bytes
1505 *
1506 * The caller provided key is set for the single block cipher referenced by the
1507 * cipher handle.
1508 *
1509 * Note, the key length determines the cipher type. Many block ciphers implement
1510 * different cipher modes depending on the key size, such as AES-128 vs AES-192
1511 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
1512 * is performed.
1513 *
1514 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
1515 */
1516static inline int crypto_cipher_setkey(struct crypto_cipher *tfm,
1517                                       const u8 *key, unsigned int keylen)
1518{
1519        return crypto_cipher_crt(tfm)->cit_setkey(crypto_cipher_tfm(tfm),
1520                                                  key, keylen);
1521}
1522
1523/**
1524 * crypto_cipher_encrypt_one() - encrypt one block of plaintext
1525 * @tfm: cipher handle
1526 * @dst: points to the buffer that will be filled with the ciphertext
1527 * @src: buffer holding the plaintext to be encrypted
1528 *
1529 * Invoke the encryption operation of one block. The caller must ensure that
1530 * the plaintext and ciphertext buffers are at least one block in size.
1531 */
1532static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
1533                                             u8 *dst, const u8 *src)
1534{
1535        crypto_cipher_crt(tfm)->cit_encrypt_one(crypto_cipher_tfm(tfm),
1536                                                dst, src);
1537}
1538
1539/**
1540 * crypto_cipher_decrypt_one() - decrypt one block of ciphertext
1541 * @tfm: cipher handle
1542 * @dst: points to the buffer that will be filled with the plaintext
1543 * @src: buffer holding the ciphertext to be decrypted
1544 *
1545 * Invoke the decryption operation of one block. The caller must ensure that
1546 * the plaintext and ciphertext buffers are at least one block in size.
1547 */
1548static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
1549                                             u8 *dst, const u8 *src)
1550{
1551        crypto_cipher_crt(tfm)->cit_decrypt_one(crypto_cipher_tfm(tfm),
1552                                                dst, src);
1553}
1554
1555static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm)
1556{
1557        return (struct crypto_comp *)tfm;
1558}
1559
1560static inline struct crypto_comp *crypto_comp_cast(struct crypto_tfm *tfm)
1561{
1562        BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_COMPRESS) &
1563               CRYPTO_ALG_TYPE_MASK);
1564        return __crypto_comp_cast(tfm);
1565}
1566
1567static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name,
1568                                                    u32 type, u32 mask)
1569{
1570        type &= ~CRYPTO_ALG_TYPE_MASK;
1571        type |= CRYPTO_ALG_TYPE_COMPRESS;
1572        mask |= CRYPTO_ALG_TYPE_MASK;
1573
1574        return __crypto_comp_cast(crypto_alloc_base(alg_name, type, mask));
1575}
1576
1577static inline struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm)
1578{
1579        return &tfm->base;
1580}
1581
1582static inline void crypto_free_comp(struct crypto_comp *tfm)
1583{
1584        crypto_free_tfm(crypto_comp_tfm(tfm));
1585}
1586
1587static inline int crypto_has_comp(const char *alg_name, u32 type, u32 mask)
1588{
1589        type &= ~CRYPTO_ALG_TYPE_MASK;
1590        type |= CRYPTO_ALG_TYPE_COMPRESS;
1591        mask |= CRYPTO_ALG_TYPE_MASK;
1592
1593        return crypto_has_alg(alg_name, type, mask);
1594}
1595
1596static inline const char *crypto_comp_name(struct crypto_comp *tfm)
1597{
1598        return crypto_tfm_alg_name(crypto_comp_tfm(tfm));
1599}
1600
1601static inline struct compress_tfm *crypto_comp_crt(struct crypto_comp *tfm)
1602{
1603        return &crypto_comp_tfm(tfm)->crt_compress;
1604}
1605
1606static inline int crypto_comp_compress(struct crypto_comp *tfm,
1607                                       const u8 *src, unsigned int slen,
1608                                       u8 *dst, unsigned int *dlen)
1609{
1610        return crypto_comp_crt(tfm)->cot_compress(crypto_comp_tfm(tfm),
1611                                                  src, slen, dst, dlen);
1612}
1613
1614static inline int crypto_comp_decompress(struct crypto_comp *tfm,
1615                                         const u8 *src, unsigned int slen,
1616                                         u8 *dst, unsigned int *dlen)
1617{
1618        return crypto_comp_crt(tfm)->cot_decompress(crypto_comp_tfm(tfm),
1619                                                    src, slen, dst, dlen);
1620}
1621
1622#endif  /* _LINUX_CRYPTO_H */
1623
1624