1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* 3 * Scatterlist Cryptographic API. 4 * 5 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> 6 * Copyright (c) 2002 David S. Miller (davem@redhat.com) 7 * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au> 8 * 9 * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no> 10 * and Nettle, by Niels Möller. 11 */ 12#ifndef _LINUX_CRYPTO_H 13#define _LINUX_CRYPTO_H 14 15#include <linux/atomic.h> 16#include <linux/kernel.h> 17#include <linux/list.h> 18#include <linux/bug.h> 19#include <linux/slab.h> 20#include <linux/string.h> 21#include <linux/uaccess.h> 22#include <linux/completion.h> 23 24/* 25 * Autoloaded crypto modules should only use a prefixed name to avoid allowing 26 * arbitrary modules to be loaded. Loading from userspace may still need the 27 * unprefixed names, so retains those aliases as well. 28 * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3 29 * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro 30 * expands twice on the same line. Instead, use a separate base name for the 31 * alias. 32 */ 33#define MODULE_ALIAS_CRYPTO(name) \ 34 __MODULE_INFO(alias, alias_userspace, name); \ 35 __MODULE_INFO(alias, alias_crypto, "crypto-" name) 36 37/* 38 * Algorithm masks and types. 39 */ 40#define CRYPTO_ALG_TYPE_MASK 0x0000000f 41#define CRYPTO_ALG_TYPE_CIPHER 0x00000001 42#define CRYPTO_ALG_TYPE_COMPRESS 0x00000002 43#define CRYPTO_ALG_TYPE_AEAD 0x00000003 44#define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004 45#define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005 46#define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005 47#define CRYPTO_ALG_TYPE_KPP 0x00000008 48#define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a 49#define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b 50#define CRYPTO_ALG_TYPE_RNG 0x0000000c 51#define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d 52#define CRYPTO_ALG_TYPE_HASH 0x0000000e 53#define CRYPTO_ALG_TYPE_SHASH 0x0000000e 54#define CRYPTO_ALG_TYPE_AHASH 0x0000000f 55 56#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e 57#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e 58#define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c 59#define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e 60 61#define CRYPTO_ALG_LARVAL 0x00000010 62#define CRYPTO_ALG_DEAD 0x00000020 63#define CRYPTO_ALG_DYING 0x00000040 64#define CRYPTO_ALG_ASYNC 0x00000080 65 66/* 67 * Set this bit if and only if the algorithm requires another algorithm of 68 * the same type to handle corner cases. 69 */ 70#define CRYPTO_ALG_NEED_FALLBACK 0x00000100 71 72/* 73 * Set if the algorithm has passed automated run-time testing. Note that 74 * if there is no run-time testing for a given algorithm it is considered 75 * to have passed. 76 */ 77 78#define CRYPTO_ALG_TESTED 0x00000400 79 80/* 81 * Set if the algorithm is an instance that is built from templates. 82 */ 83#define CRYPTO_ALG_INSTANCE 0x00000800 84 85/* Set this bit if the algorithm provided is hardware accelerated but 86 * not available to userspace via instruction set or so. 87 */ 88#define CRYPTO_ALG_KERN_DRIVER_ONLY 0x00001000 89 90/* 91 * Mark a cipher as a service implementation only usable by another 92 * cipher and never by a normal user of the kernel crypto API 93 */ 94#define CRYPTO_ALG_INTERNAL 0x00002000 95 96/* 97 * Set if the algorithm has a ->setkey() method but can be used without 98 * calling it first, i.e. there is a default key. 99 */ 100#define CRYPTO_ALG_OPTIONAL_KEY 0x00004000 101 102/* 103 * Don't trigger module loading 104 */ 105#define CRYPTO_NOLOAD 0x00008000 106 107/* 108 * Transform masks and values (for crt_flags). 109 */ 110#define CRYPTO_TFM_NEED_KEY 0x00000001 111 112#define CRYPTO_TFM_REQ_MASK 0x000fff00 113#define CRYPTO_TFM_RES_MASK 0xfff00000 114 115#define CRYPTO_TFM_REQ_FORBID_WEAK_KEYS 0x00000100 116#define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200 117#define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400 118#define CRYPTO_TFM_RES_WEAK_KEY 0x00100000 119#define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000 120#define CRYPTO_TFM_RES_BAD_KEY_SCHED 0x00400000 121#define CRYPTO_TFM_RES_BAD_BLOCK_LEN 0x00800000 122#define CRYPTO_TFM_RES_BAD_FLAGS 0x01000000 123 124/* 125 * Miscellaneous stuff. 126 */ 127#define CRYPTO_MAX_ALG_NAME 128 128 129/* 130 * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual 131 * declaration) is used to ensure that the crypto_tfm context structure is 132 * aligned correctly for the given architecture so that there are no alignment 133 * faults for C data types. In particular, this is required on platforms such 134 * as arm where pointers are 32-bit aligned but there are data types such as 135 * u64 which require 64-bit alignment. 136 */ 137#define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN 138 139#define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN))) 140 141struct scatterlist; 142struct crypto_ablkcipher; 143struct crypto_async_request; 144struct crypto_blkcipher; 145struct crypto_tfm; 146struct crypto_type; 147 148typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); 149 150/** 151 * DOC: Block Cipher Context Data Structures 152 * 153 * These data structures define the operating context for each block cipher 154 * type. 155 */ 156 157struct crypto_async_request { 158 struct list_head list; 159 crypto_completion_t complete; 160 void *data; 161 struct crypto_tfm *tfm; 162 163 u32 flags; 164}; 165 166struct ablkcipher_request { 167 struct crypto_async_request base; 168 169 unsigned int nbytes; 170 171 void *info; 172 173 struct scatterlist *src; 174 struct scatterlist *dst; 175 176 void *__ctx[] CRYPTO_MINALIGN_ATTR; 177}; 178 179struct blkcipher_desc { 180 struct crypto_blkcipher *tfm; 181 void *info; 182 u32 flags; 183}; 184 185/** 186 * DOC: Block Cipher Algorithm Definitions 187 * 188 * These data structures define modular crypto algorithm implementations, 189 * managed via crypto_register_alg() and crypto_unregister_alg(). 190 */ 191 192/** 193 * struct ablkcipher_alg - asynchronous block cipher definition 194 * @min_keysize: Minimum key size supported by the transformation. This is the 195 * smallest key length supported by this transformation algorithm. 196 * This must be set to one of the pre-defined values as this is 197 * not hardware specific. Possible values for this field can be 198 * found via git grep "_MIN_KEY_SIZE" include/crypto/ 199 * @max_keysize: Maximum key size supported by the transformation. This is the 200 * largest key length supported by this transformation algorithm. 201 * This must be set to one of the pre-defined values as this is 202 * not hardware specific. Possible values for this field can be 203 * found via git grep "_MAX_KEY_SIZE" include/crypto/ 204 * @setkey: Set key for the transformation. This function is used to either 205 * program a supplied key into the hardware or store the key in the 206 * transformation context for programming it later. Note that this 207 * function does modify the transformation context. This function can 208 * be called multiple times during the existence of the transformation 209 * object, so one must make sure the key is properly reprogrammed into 210 * the hardware. This function is also responsible for checking the key 211 * length for validity. In case a software fallback was put in place in 212 * the @cra_init call, this function might need to use the fallback if 213 * the algorithm doesn't support all of the key sizes. 214 * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt 215 * the supplied scatterlist containing the blocks of data. The crypto 216 * API consumer is responsible for aligning the entries of the 217 * scatterlist properly and making sure the chunks are correctly 218 * sized. In case a software fallback was put in place in the 219 * @cra_init call, this function might need to use the fallback if 220 * the algorithm doesn't support all of the key sizes. In case the 221 * key was stored in transformation context, the key might need to be 222 * re-programmed into the hardware in this function. This function 223 * shall not modify the transformation context, as this function may 224 * be called in parallel with the same transformation object. 225 * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt 226 * and the conditions are exactly the same. 227 * @ivsize: IV size applicable for transformation. The consumer must provide an 228 * IV of exactly that size to perform the encrypt or decrypt operation. 229 * 230 * All fields except @ivsize are mandatory and must be filled. 231 */ 232struct ablkcipher_alg { 233 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, 234 unsigned int keylen); 235 int (*encrypt)(struct ablkcipher_request *req); 236 int (*decrypt)(struct ablkcipher_request *req); 237 238 unsigned int min_keysize; 239 unsigned int max_keysize; 240 unsigned int ivsize; 241}; 242 243/** 244 * struct blkcipher_alg - synchronous block cipher definition 245 * @min_keysize: see struct ablkcipher_alg 246 * @max_keysize: see struct ablkcipher_alg 247 * @setkey: see struct ablkcipher_alg 248 * @encrypt: see struct ablkcipher_alg 249 * @decrypt: see struct ablkcipher_alg 250 * @ivsize: see struct ablkcipher_alg 251 * 252 * All fields except @ivsize are mandatory and must be filled. 253 */ 254struct blkcipher_alg { 255 int (*setkey)(struct crypto_tfm *tfm, const u8 *key, 256 unsigned int keylen); 257 int (*setkeytype)(struct crypto_tfm *tfm, const u8 *keytype, 258 unsigned int keylen); 259 int (*encrypt)(struct blkcipher_desc *desc, 260 struct scatterlist *dst, struct scatterlist *src, 261 unsigned int nbytes); 262 int (*decrypt)(struct blkcipher_desc *desc, 263 struct scatterlist *dst, struct scatterlist *src, 264 unsigned int nbytes); 265 266 unsigned int min_keysize; 267 unsigned int max_keysize; 268 unsigned int ivsize; 269}; 270 271/** 272 * struct cipher_alg - single-block symmetric ciphers definition 273 * @cia_min_keysize: Minimum key size supported by the transformation. This is 274 * the smallest key length supported by this transformation 275 * algorithm. This must be set to one of the pre-defined 276 * values as this is not hardware specific. Possible values 277 * for this field can be found via git grep "_MIN_KEY_SIZE" 278 * include/crypto/ 279 * @cia_max_keysize: Maximum key size supported by the transformation. This is 280 * the largest key length supported by this transformation 281 * algorithm. This must be set to one of the pre-defined values 282 * as this is not hardware specific. Possible values for this 283 * field can be found via git grep "_MAX_KEY_SIZE" 284 * include/crypto/ 285 * @cia_setkey: Set key for the transformation. This function is used to either 286 * program a supplied key into the hardware or store the key in the 287 * transformation context for programming it later. Note that this 288 * function does modify the transformation context. This function 289 * can be called multiple times during the existence of the 290 * transformation object, so one must make sure the key is properly 291 * reprogrammed into the hardware. This function is also 292 * responsible for checking the key length for validity. 293 * @cia_encrypt: Encrypt a single block. This function is used to encrypt a 294 * single block of data, which must be @cra_blocksize big. This 295 * always operates on a full @cra_blocksize and it is not possible 296 * to encrypt a block of smaller size. The supplied buffers must 297 * therefore also be at least of @cra_blocksize size. Both the 298 * input and output buffers are always aligned to @cra_alignmask. 299 * In case either of the input or output buffer supplied by user 300 * of the crypto API is not aligned to @cra_alignmask, the crypto 301 * API will re-align the buffers. The re-alignment means that a 302 * new buffer will be allocated, the data will be copied into the 303 * new buffer, then the processing will happen on the new buffer, 304 * then the data will be copied back into the original buffer and 305 * finally the new buffer will be freed. In case a software 306 * fallback was put in place in the @cra_init call, this function 307 * might need to use the fallback if the algorithm doesn't support 308 * all of the key sizes. In case the key was stored in 309 * transformation context, the key might need to be re-programmed 310 * into the hardware in this function. This function shall not 311 * modify the transformation context, as this function may be 312 * called in parallel with the same transformation object. 313 * @cia_decrypt: Decrypt a single block. This is a reverse counterpart to 314 * @cia_encrypt, and the conditions are exactly the same. 315 * 316 * All fields are mandatory and must be filled. 317 */ 318struct cipher_alg { 319 unsigned int cia_min_keysize; 320 unsigned int cia_max_keysize; 321 int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key, 322 unsigned int keylen); 323 void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 324 void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 325}; 326 327/** 328 * struct compress_alg - compression/decompression algorithm 329 * @coa_compress: Compress a buffer of specified length, storing the resulting 330 * data in the specified buffer. Return the length of the 331 * compressed data in dlen. 332 * @coa_decompress: Decompress the source buffer, storing the uncompressed 333 * data in the specified buffer. The length of the data is 334 * returned in dlen. 335 * 336 * All fields are mandatory. 337 */ 338struct compress_alg { 339 int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src, 340 unsigned int slen, u8 *dst, unsigned int *dlen); 341 int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src, 342 unsigned int slen, u8 *dst, unsigned int *dlen); 343}; 344 345#ifdef CONFIG_CRYPTO_STATS 346/* 347 * struct crypto_istat_aead - statistics for AEAD algorithm 348 * @encrypt_cnt: number of encrypt requests 349 * @encrypt_tlen: total data size handled by encrypt requests 350 * @decrypt_cnt: number of decrypt requests 351 * @decrypt_tlen: total data size handled by decrypt requests 352 * @err_cnt: number of error for AEAD requests 353 */ 354struct crypto_istat_aead { 355 atomic64_t encrypt_cnt; 356 atomic64_t encrypt_tlen; 357 atomic64_t decrypt_cnt; 358 atomic64_t decrypt_tlen; 359 atomic64_t err_cnt; 360}; 361 362/* 363 * struct crypto_istat_akcipher - statistics for akcipher algorithm 364 * @encrypt_cnt: number of encrypt requests 365 * @encrypt_tlen: total data size handled by encrypt requests 366 * @decrypt_cnt: number of decrypt requests 367 * @decrypt_tlen: total data size handled by decrypt requests 368 * @verify_cnt: number of verify operation 369 * @sign_cnt: number of sign requests 370 * @err_cnt: number of error for akcipher requests 371 */ 372struct crypto_istat_akcipher { 373 atomic64_t encrypt_cnt; 374 atomic64_t encrypt_tlen; 375 atomic64_t decrypt_cnt; 376 atomic64_t decrypt_tlen; 377 atomic64_t verify_cnt; 378 atomic64_t sign_cnt; 379 atomic64_t err_cnt; 380}; 381 382/* 383 * struct crypto_istat_cipher - statistics for cipher algorithm 384 * @encrypt_cnt: number of encrypt requests 385 * @encrypt_tlen: total data size handled by encrypt requests 386 * @decrypt_cnt: number of decrypt requests 387 * @decrypt_tlen: total data size handled by decrypt requests 388 * @err_cnt: number of error for cipher requests 389 */ 390struct crypto_istat_cipher { 391 atomic64_t encrypt_cnt; 392 atomic64_t encrypt_tlen; 393 atomic64_t decrypt_cnt; 394 atomic64_t decrypt_tlen; 395 atomic64_t err_cnt; 396}; 397 398/* 399 * struct crypto_istat_compress - statistics for compress algorithm 400 * @compress_cnt: number of compress requests 401 * @compress_tlen: total data size handled by compress requests 402 * @decompress_cnt: number of decompress requests 403 * @decompress_tlen: total data size handled by decompress requests 404 * @err_cnt: number of error for compress requests 405 */ 406struct crypto_istat_compress { 407 atomic64_t compress_cnt; 408 atomic64_t compress_tlen; 409 atomic64_t decompress_cnt; 410 atomic64_t decompress_tlen; 411 atomic64_t err_cnt; 412}; 413 414/* 415 * struct crypto_istat_hash - statistics for has algorithm 416 * @hash_cnt: number of hash requests 417 * @hash_tlen: total data size hashed 418 * @err_cnt: number of error for hash requests 419 */ 420struct crypto_istat_hash { 421 atomic64_t hash_cnt; 422 atomic64_t hash_tlen; 423 atomic64_t err_cnt; 424}; 425 426/* 427 * struct crypto_istat_kpp - statistics for KPP algorithm 428 * @setsecret_cnt: number of setsecrey operation 429 * @generate_public_key_cnt: number of generate_public_key operation 430 * @compute_shared_secret_cnt: number of compute_shared_secret operation 431 * @err_cnt: number of error for KPP requests 432 */ 433struct crypto_istat_kpp { 434 atomic64_t setsecret_cnt; 435 atomic64_t generate_public_key_cnt; 436 atomic64_t compute_shared_secret_cnt; 437 atomic64_t err_cnt; 438}; 439 440/* 441 * struct crypto_istat_rng: statistics for RNG algorithm 442 * @generate_cnt: number of RNG generate requests 443 * @generate_tlen: total data size of generated data by the RNG 444 * @seed_cnt: number of times the RNG was seeded 445 * @err_cnt: number of error for RNG requests 446 */ 447struct crypto_istat_rng { 448 atomic64_t generate_cnt; 449 atomic64_t generate_tlen; 450 atomic64_t seed_cnt; 451 atomic64_t err_cnt; 452}; 453#endif /* CONFIG_CRYPTO_STATS */ 454 455#define cra_ablkcipher cra_u.ablkcipher 456#define cra_blkcipher cra_u.blkcipher 457#define cra_cipher cra_u.cipher 458#define cra_compress cra_u.compress 459 460/** 461 * struct crypto_alg - definition of a cryptograpic cipher algorithm 462 * @cra_flags: Flags describing this transformation. See include/linux/crypto.h 463 * CRYPTO_ALG_* flags for the flags which go in here. Those are 464 * used for fine-tuning the description of the transformation 465 * algorithm. 466 * @cra_blocksize: Minimum block size of this transformation. The size in bytes 467 * of the smallest possible unit which can be transformed with 468 * this algorithm. The users must respect this value. 469 * In case of HASH transformation, it is possible for a smaller 470 * block than @cra_blocksize to be passed to the crypto API for 471 * transformation, in case of any other transformation type, an 472 * error will be returned upon any attempt to transform smaller 473 * than @cra_blocksize chunks. 474 * @cra_ctxsize: Size of the operational context of the transformation. This 475 * value informs the kernel crypto API about the memory size 476 * needed to be allocated for the transformation context. 477 * @cra_alignmask: Alignment mask for the input and output data buffer. The data 478 * buffer containing the input data for the algorithm must be 479 * aligned to this alignment mask. The data buffer for the 480 * output data must be aligned to this alignment mask. Note that 481 * the Crypto API will do the re-alignment in software, but 482 * only under special conditions and there is a performance hit. 483 * The re-alignment happens at these occasions for different 484 * @cra_u types: cipher -- For both input data and output data 485 * buffer; ahash -- For output hash destination buf; shash -- 486 * For output hash destination buf. 487 * This is needed on hardware which is flawed by design and 488 * cannot pick data from arbitrary addresses. 489 * @cra_priority: Priority of this transformation implementation. In case 490 * multiple transformations with same @cra_name are available to 491 * the Crypto API, the kernel will use the one with highest 492 * @cra_priority. 493 * @cra_name: Generic name (usable by multiple implementations) of the 494 * transformation algorithm. This is the name of the transformation 495 * itself. This field is used by the kernel when looking up the 496 * providers of particular transformation. 497 * @cra_driver_name: Unique name of the transformation provider. This is the 498 * name of the provider of the transformation. This can be any 499 * arbitrary value, but in the usual case, this contains the 500 * name of the chip or provider and the name of the 501 * transformation algorithm. 502 * @cra_type: Type of the cryptographic transformation. This is a pointer to 503 * struct crypto_type, which implements callbacks common for all 504 * transformation types. There are multiple options: 505 * &crypto_blkcipher_type, &crypto_ablkcipher_type, 506 * &crypto_ahash_type, &crypto_rng_type. 507 * This field might be empty. In that case, there are no common 508 * callbacks. This is the case for: cipher, compress, shash. 509 * @cra_u: Callbacks implementing the transformation. This is a union of 510 * multiple structures. Depending on the type of transformation selected 511 * by @cra_type and @cra_flags above, the associated structure must be 512 * filled with callbacks. This field might be empty. This is the case 513 * for ahash, shash. 514 * @cra_init: Initialize the cryptographic transformation object. This function 515 * is used to initialize the cryptographic transformation object. 516 * This function is called only once at the instantiation time, right 517 * after the transformation context was allocated. In case the 518 * cryptographic hardware has some special requirements which need to 519 * be handled by software, this function shall check for the precise 520 * requirement of the transformation and put any software fallbacks 521 * in place. 522 * @cra_exit: Deinitialize the cryptographic transformation object. This is a 523 * counterpart to @cra_init, used to remove various changes set in 524 * @cra_init. 525 * @cra_u.ablkcipher: Union member which contains an asynchronous block cipher 526 * definition. See @struct @ablkcipher_alg. 527 * @cra_u.blkcipher: Union member which contains a synchronous block cipher 528 * definition See @struct @blkcipher_alg. 529 * @cra_u.cipher: Union member which contains a single-block symmetric cipher 530 * definition. See @struct @cipher_alg. 531 * @cra_u.compress: Union member which contains a (de)compression algorithm. 532 * See @struct @compress_alg. 533 * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE 534 * @cra_list: internally used 535 * @cra_users: internally used 536 * @cra_refcnt: internally used 537 * @cra_destroy: internally used 538 * 539 * @stats: union of all possible crypto_istat_xxx structures 540 * @stats.aead: statistics for AEAD algorithm 541 * @stats.akcipher: statistics for akcipher algorithm 542 * @stats.cipher: statistics for cipher algorithm 543 * @stats.compress: statistics for compress algorithm 544 * @stats.hash: statistics for hash algorithm 545 * @stats.rng: statistics for rng algorithm 546 * @stats.kpp: statistics for KPP algorithm 547 * 548 * The struct crypto_alg describes a generic Crypto API algorithm and is common 549 * for all of the transformations. Any variable not documented here shall not 550 * be used by a cipher implementation as it is internal to the Crypto API. 551 */ 552struct crypto_alg { 553 struct list_head cra_list; 554 struct list_head cra_users; 555 556 u32 cra_flags; 557 unsigned int cra_blocksize; 558 unsigned int cra_ctxsize; 559 unsigned int cra_alignmask; 560 561 int cra_priority; 562 refcount_t cra_refcnt; 563 564 char cra_name[CRYPTO_MAX_ALG_NAME]; 565 char cra_driver_name[CRYPTO_MAX_ALG_NAME]; 566 567 const struct crypto_type *cra_type; 568 569 union { 570 struct ablkcipher_alg ablkcipher; 571 struct blkcipher_alg blkcipher; 572 struct cipher_alg cipher; 573 struct compress_alg compress; 574 } cra_u; 575 576 int (*cra_init)(struct crypto_tfm *tfm); 577 void (*cra_exit)(struct crypto_tfm *tfm); 578 void (*cra_destroy)(struct crypto_alg *alg); 579 580 struct module *cra_module; 581 582#ifdef CONFIG_CRYPTO_STATS 583 union { 584 struct crypto_istat_aead aead; 585 struct crypto_istat_akcipher akcipher; 586 struct crypto_istat_cipher cipher; 587 struct crypto_istat_compress compress; 588 struct crypto_istat_hash hash; 589 struct crypto_istat_rng rng; 590 struct crypto_istat_kpp kpp; 591 } stats; 592#endif /* CONFIG_CRYPTO_STATS */ 593 594} CRYPTO_MINALIGN_ATTR; 595 596#ifdef CONFIG_CRYPTO_STATS 597void crypto_stats_init(struct crypto_alg *alg); 598void crypto_stats_get(struct crypto_alg *alg); 599void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret, struct crypto_alg *alg); 600void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret, struct crypto_alg *alg); 601void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret); 602void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret); 603void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg); 604void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg); 605void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, struct crypto_alg *alg); 606void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, struct crypto_alg *alg); 607void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg); 608void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg); 609void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg); 610void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg); 611void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret); 612void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret); 613void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret); 614void crypto_stats_rng_seed(struct crypto_alg *alg, int ret); 615void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret); 616void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg); 617void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg); 618#else 619static inline void crypto_stats_init(struct crypto_alg *alg) 620{} 621static inline void crypto_stats_get(struct crypto_alg *alg) 622{} 623static inline void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret, struct crypto_alg *alg) 624{} 625static inline void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret, struct crypto_alg *alg) 626{} 627static inline void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret) 628{} 629static inline void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret) 630{} 631static inline void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg) 632{} 633static inline void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg) 634{} 635static inline void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, struct crypto_alg *alg) 636{} 637static inline void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, struct crypto_alg *alg) 638{} 639static inline void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg) 640{} 641static inline void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg) 642{} 643static inline void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg) 644{} 645static inline void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg) 646{} 647static inline void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret) 648{} 649static inline void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret) 650{} 651static inline void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret) 652{} 653static inline void crypto_stats_rng_seed(struct crypto_alg *alg, int ret) 654{} 655static inline void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret) 656{} 657static inline void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg) 658{} 659static inline void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg) 660{} 661#endif 662/* 663 * A helper struct for waiting for completion of async crypto ops 664 */ 665struct crypto_wait { 666 struct completion completion; 667 int err; 668}; 669 670/* 671 * Macro for declaring a crypto op async wait object on stack 672 */ 673#define DECLARE_CRYPTO_WAIT(_wait) \ 674 struct crypto_wait _wait = { \ 675 COMPLETION_INITIALIZER_ONSTACK((_wait).completion), 0 } 676 677/* 678 * Async ops completion helper functioons 679 */ 680void crypto_req_done(struct crypto_async_request *req, int err); 681 682static inline int crypto_wait_req(int err, struct crypto_wait *wait) 683{ 684 switch (err) { 685 case -EINPROGRESS: 686 case -EBUSY: 687 wait_for_completion(&wait->completion); 688 reinit_completion(&wait->completion); 689 err = wait->err; 690 break; 691 }; 692 693 return err; 694} 695 696static inline void crypto_init_wait(struct crypto_wait *wait) 697{ 698 init_completion(&wait->completion); 699} 700 701/* 702 * Algorithm registration interface. 703 */ 704int crypto_register_alg(struct crypto_alg *alg); 705int crypto_unregister_alg(struct crypto_alg *alg); 706int crypto_register_algs(struct crypto_alg *algs, int count); 707int crypto_unregister_algs(struct crypto_alg *algs, int count); 708 709/* 710 * Algorithm query interface. 711 */ 712int crypto_has_alg(const char *name, u32 type, u32 mask); 713 714/* 715 * Transforms: user-instantiated objects which encapsulate algorithms 716 * and core processing logic. Managed via crypto_alloc_*() and 717 * crypto_free_*(), as well as the various helpers below. 718 */ 719 720struct ablkcipher_tfm { 721 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, 722 unsigned int keylen); 723 int (*encrypt)(struct ablkcipher_request *req); 724 int (*decrypt)(struct ablkcipher_request *req); 725 726 struct crypto_ablkcipher *base; 727 728 unsigned int ivsize; 729 unsigned int reqsize; 730}; 731 732struct blkcipher_tfm { 733 void *iv; 734 int (*setkey)(struct crypto_tfm *tfm, const u8 *key, 735 unsigned int keylen); 736 int (*setkeytype)(struct crypto_tfm *tfm, const u8 *key, 737 unsigned int keylen); 738 int (*encrypt)(struct blkcipher_desc *desc, struct scatterlist *dst, 739 struct scatterlist *src, unsigned int nbytes); 740 int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst, 741 struct scatterlist *src, unsigned int nbytes); 742}; 743 744struct cipher_tfm { 745 int (*cit_setkey)(struct crypto_tfm *tfm, 746 const u8 *key, unsigned int keylen); 747 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 748 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); 749}; 750 751struct compress_tfm { 752 int (*cot_compress)(struct crypto_tfm *tfm, 753 const u8 *src, unsigned int slen, 754 u8 *dst, unsigned int *dlen); 755 int (*cot_decompress)(struct crypto_tfm *tfm, 756 const u8 *src, unsigned int slen, 757 u8 *dst, unsigned int *dlen); 758}; 759 760#define crt_ablkcipher crt_u.ablkcipher 761#define crt_blkcipher crt_u.blkcipher 762#define crt_cipher crt_u.cipher 763#define crt_compress crt_u.compress 764 765struct crypto_tfm { 766 767 u32 crt_flags; 768 769 union { 770 struct ablkcipher_tfm ablkcipher; 771 struct blkcipher_tfm blkcipher; 772 struct cipher_tfm cipher; 773 struct compress_tfm compress; 774 } crt_u; 775 776 void (*exit)(struct crypto_tfm *tfm); 777 778 struct crypto_alg *__crt_alg; 779 780 void *__crt_ctx[] CRYPTO_MINALIGN_ATTR; 781}; 782 783struct crypto_ablkcipher { 784 struct crypto_tfm base; 785}; 786 787struct crypto_blkcipher { 788 struct crypto_tfm base; 789}; 790 791struct crypto_cipher { 792 struct crypto_tfm base; 793}; 794 795struct crypto_comp { 796 struct crypto_tfm base; 797}; 798 799enum { 800 CRYPTOA_UNSPEC, 801 CRYPTOA_ALG, 802 CRYPTOA_TYPE, 803 CRYPTOA_U32, 804 __CRYPTOA_MAX, 805}; 806 807#define CRYPTOA_MAX (__CRYPTOA_MAX - 1) 808 809/* Maximum number of (rtattr) parameters for each template. */ 810#define CRYPTO_MAX_ATTRS 32 811 812struct crypto_attr_alg { 813 char name[CRYPTO_MAX_ALG_NAME]; 814}; 815 816struct crypto_attr_type { 817 u32 type; 818 u32 mask; 819}; 820 821struct crypto_attr_u32 { 822 u32 num; 823}; 824 825/* 826 * Transform user interface. 827 */ 828 829struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask); 830void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm); 831 832static inline void crypto_free_tfm(struct crypto_tfm *tfm) 833{ 834 return crypto_destroy_tfm(tfm, tfm); 835} 836 837int alg_test(const char *driver, const char *alg, u32 type, u32 mask); 838 839/* 840 * Transform helpers which query the underlying algorithm. 841 */ 842static inline const char *crypto_tfm_alg_name(struct crypto_tfm *tfm) 843{ 844 return tfm->__crt_alg->cra_name; 845} 846 847static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm) 848{ 849 return tfm->__crt_alg->cra_driver_name; 850} 851 852static inline int crypto_tfm_alg_priority(struct crypto_tfm *tfm) 853{ 854 return tfm->__crt_alg->cra_priority; 855} 856 857static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm) 858{ 859 return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK; 860} 861 862static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm) 863{ 864 return tfm->__crt_alg->cra_blocksize; 865} 866 867static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm) 868{ 869 return tfm->__crt_alg->cra_alignmask; 870} 871 872static inline u32 crypto_tfm_get_flags(struct crypto_tfm *tfm) 873{ 874 return tfm->crt_flags; 875} 876 877static inline void crypto_tfm_set_flags(struct crypto_tfm *tfm, u32 flags) 878{ 879 tfm->crt_flags |= flags; 880} 881 882static inline void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags) 883{ 884 tfm->crt_flags &= ~flags; 885} 886 887static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm) 888{ 889 return tfm->__crt_ctx; 890} 891 892static inline unsigned int crypto_tfm_ctx_alignment(void) 893{ 894 struct crypto_tfm *tfm; 895 return __alignof__(tfm->__crt_ctx); 896} 897 898/* 899 * API wrappers. 900 */ 901static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast( 902 struct crypto_tfm *tfm) 903{ 904 return (struct crypto_ablkcipher *)tfm; 905} 906 907static inline u32 crypto_skcipher_type(u32 type) 908{ 909 type &= ~CRYPTO_ALG_TYPE_MASK; 910 type |= CRYPTO_ALG_TYPE_BLKCIPHER; 911 return type; 912} 913 914static inline u32 crypto_skcipher_mask(u32 mask) 915{ 916 mask &= ~CRYPTO_ALG_TYPE_MASK; 917 mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK; 918 return mask; 919} 920 921/** 922 * DOC: Asynchronous Block Cipher API 923 * 924 * Asynchronous block cipher API is used with the ciphers of type 925 * CRYPTO_ALG_TYPE_ABLKCIPHER (listed as type "ablkcipher" in /proc/crypto). 926 * 927 * Asynchronous cipher operations imply that the function invocation for a 928 * cipher request returns immediately before the completion of the operation. 929 * The cipher request is scheduled as a separate kernel thread and therefore 930 * load-balanced on the different CPUs via the process scheduler. To allow 931 * the kernel crypto API to inform the caller about the completion of a cipher 932 * request, the caller must provide a callback function. That function is 933 * invoked with the cipher handle when the request completes. 934 * 935 * To support the asynchronous operation, additional information than just the 936 * cipher handle must be supplied to the kernel crypto API. That additional 937 * information is given by filling in the ablkcipher_request data structure. 938 * 939 * For the asynchronous block cipher API, the state is maintained with the tfm 940 * cipher handle. A single tfm can be used across multiple calls and in 941 * parallel. For asynchronous block cipher calls, context data supplied and 942 * only used by the caller can be referenced the request data structure in 943 * addition to the IV used for the cipher request. The maintenance of such 944 * state information would be important for a crypto driver implementer to 945 * have, because when calling the callback function upon completion of the 946 * cipher operation, that callback function may need some information about 947 * which operation just finished if it invoked multiple in parallel. This 948 * state information is unused by the kernel crypto API. 949 */ 950 951static inline struct crypto_tfm *crypto_ablkcipher_tfm( 952 struct crypto_ablkcipher *tfm) 953{ 954 return &tfm->base; 955} 956 957/** 958 * crypto_free_ablkcipher() - zeroize and free cipher handle 959 * @tfm: cipher handle to be freed 960 */ 961static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm) 962{ 963 crypto_free_tfm(crypto_ablkcipher_tfm(tfm)); 964} 965 966/** 967 * crypto_has_ablkcipher() - Search for the availability of an ablkcipher. 968 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the 969 * ablkcipher 970 * @type: specifies the type of the cipher 971 * @mask: specifies the mask for the cipher 972 * 973 * Return: true when the ablkcipher is known to the kernel crypto API; false 974 * otherwise 975 */ 976static inline int crypto_has_ablkcipher(const char *alg_name, u32 type, 977 u32 mask) 978{ 979 return crypto_has_alg(alg_name, crypto_skcipher_type(type), 980 crypto_skcipher_mask(mask)); 981} 982 983static inline struct ablkcipher_tfm *crypto_ablkcipher_crt( 984 struct crypto_ablkcipher *tfm) 985{ 986 return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher; 987} 988 989/** 990 * crypto_ablkcipher_ivsize() - obtain IV size 991 * @tfm: cipher handle 992 * 993 * The size of the IV for the ablkcipher referenced by the cipher handle is 994 * returned. This IV size may be zero if the cipher does not need an IV. 995 * 996 * Return: IV size in bytes 997 */ 998static inline unsigned int crypto_ablkcipher_ivsize( 999 struct crypto_ablkcipher *tfm) 1000{
1001 return crypto_ablkcipher_crt(tfm)->ivsize; 1002} 1003 1004/** 1005 * crypto_ablkcipher_blocksize() - obtain block size of cipher 1006 * @tfm: cipher handle 1007 * 1008 * The block size for the ablkcipher referenced with the cipher handle is 1009 * returned. The caller may use that information to allocate appropriate 1010 * memory for the data returned by the encryption or decryption operation 1011 * 1012 * Return: block size of cipher 1013 */ 1014static inline unsigned int crypto_ablkcipher_blocksize( 1015 struct crypto_ablkcipher *tfm) 1016{ 1017 return crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(tfm)); 1018} 1019 1020static inline unsigned int crypto_ablkcipher_alignmask( 1021 struct crypto_ablkcipher *tfm) 1022{ 1023 return crypto_tfm_alg_alignmask(crypto_ablkcipher_tfm(tfm)); 1024} 1025 1026static inline u32 crypto_ablkcipher_get_flags(struct crypto_ablkcipher *tfm) 1027{ 1028 return crypto_tfm_get_flags(crypto_ablkcipher_tfm(tfm)); 1029} 1030 1031static inline void crypto_ablkcipher_set_flags(struct crypto_ablkcipher *tfm, 1032 u32 flags) 1033{ 1034 crypto_tfm_set_flags(crypto_ablkcipher_tfm(tfm), flags); 1035} 1036 1037static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm, 1038 u32 flags) 1039{ 1040 crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags); 1041} 1042 1043/** 1044 * crypto_ablkcipher_setkey() - set key for cipher 1045 * @tfm: cipher handle 1046 * @key: buffer holding the key 1047 * @keylen: length of the key in bytes 1048 * 1049 * The caller provided key is set for the ablkcipher referenced by the cipher 1050 * handle. 1051 * 1052 * Note, the key length determines the cipher type. Many block ciphers implement 1053 * different cipher modes depending on the key size, such as AES-128 vs AES-192 1054 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 1055 * is performed. 1056 * 1057 * Return: 0 if the setting of the key was successful; < 0 if an error occurred 1058 */ 1059static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, 1060 const u8 *key, unsigned int keylen) 1061{ 1062 struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(tfm); 1063 1064 return crt->setkey(crt->base, key, keylen); 1065} 1066 1067/** 1068 * crypto_ablkcipher_reqtfm() - obtain cipher handle from request 1069 * @req: ablkcipher_request out of which the cipher handle is to be obtained 1070 * 1071 * Return the crypto_ablkcipher handle when furnishing an ablkcipher_request 1072 * data structure. 1073 * 1074 * Return: crypto_ablkcipher handle 1075 */ 1076static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm( 1077 struct ablkcipher_request *req) 1078{ 1079 return __crypto_ablkcipher_cast(req->base.tfm); 1080} 1081 1082/** 1083 * crypto_ablkcipher_encrypt() - encrypt plaintext 1084 * @req: reference to the ablkcipher_request handle that holds all information 1085 * needed to perform the cipher operation 1086 * 1087 * Encrypt plaintext data using the ablkcipher_request handle. That data 1088 * structure and how it is filled with data is discussed with the 1089 * ablkcipher_request_* functions. 1090 * 1091 * Return: 0 if the cipher operation was successful; < 0 if an error occurred 1092 */ 1093static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req) 1094{ 1095 struct ablkcipher_tfm *crt = 1096 crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); 1097 struct crypto_alg *alg = crt->base->base.__crt_alg; 1098 unsigned int nbytes = req->nbytes; 1099 int ret; 1100 1101 crypto_stats_get(alg); 1102 ret = crt->encrypt(req); 1103 crypto_stats_ablkcipher_encrypt(nbytes, ret, alg); 1104 return ret; 1105} 1106 1107/** 1108 * crypto_ablkcipher_decrypt() - decrypt ciphertext 1109 * @req: reference to the ablkcipher_request handle that holds all information 1110 * needed to perform the cipher operation 1111 * 1112 * Decrypt ciphertext data using the ablkcipher_request handle. That data 1113 * structure and how it is filled with data is discussed with the 1114 * ablkcipher_request_* functions. 1115 * 1116 * Return: 0 if the cipher operation was successful; < 0 if an error occurred 1117 */ 1118static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req) 1119{ 1120 struct ablkcipher_tfm *crt = 1121 crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); 1122 struct crypto_alg *alg = crt->base->base.__crt_alg; 1123 unsigned int nbytes = req->nbytes; 1124 int ret; 1125 1126 crypto_stats_get(alg); 1127 ret = crt->decrypt(req); 1128 crypto_stats_ablkcipher_decrypt(nbytes, ret, alg); 1129 return ret; 1130} 1131 1132/** 1133 * DOC: Asynchronous Cipher Request Handle 1134 * 1135 * The ablkcipher_request data structure contains all pointers to data 1136 * required for the asynchronous cipher operation. This includes the cipher 1137 * handle (which can be used by multiple ablkcipher_request instances), pointer 1138 * to plaintext and ciphertext, asynchronous callback function, etc. It acts 1139 * as a handle to the ablkcipher_request_* API calls in a similar way as 1140 * ablkcipher handle to the crypto_ablkcipher_* API calls. 1141 */ 1142 1143/** 1144 * crypto_ablkcipher_reqsize() - obtain size of the request data structure 1145 * @tfm: cipher handle 1146 * 1147 * Return: number of bytes 1148 */ 1149static inline unsigned int crypto_ablkcipher_reqsize( 1150 struct crypto_ablkcipher *tfm) 1151{ 1152 return crypto_ablkcipher_crt(tfm)->reqsize; 1153} 1154 1155/** 1156 * ablkcipher_request_set_tfm() - update cipher handle reference in request 1157 * @req: request handle to be modified 1158 * @tfm: cipher handle that shall be added to the request handle 1159 * 1160 * Allow the caller to replace the existing ablkcipher handle in the request 1161 * data structure with a different one. 1162 */ 1163static inline void ablkcipher_request_set_tfm( 1164 struct ablkcipher_request *req, struct crypto_ablkcipher *tfm) 1165{ 1166 req->base.tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_crt(tfm)->base); 1167} 1168 1169static inline struct ablkcipher_request *ablkcipher_request_cast( 1170 struct crypto_async_request *req) 1171{ 1172 return container_of(req, struct ablkcipher_request, base); 1173} 1174 1175/** 1176 * ablkcipher_request_alloc() - allocate request data structure 1177 * @tfm: cipher handle to be registered with the request 1178 * @gfp: memory allocation flag that is handed to kmalloc by the API call. 1179 * 1180 * Allocate the request data structure that must be used with the ablkcipher 1181 * encrypt and decrypt API calls. During the allocation, the provided ablkcipher 1182 * handle is registered in the request data structure. 1183 * 1184 * Return: allocated request handle in case of success, or NULL if out of memory 1185 */ 1186static inline struct ablkcipher_request *ablkcipher_request_alloc( 1187 struct crypto_ablkcipher *tfm, gfp_t gfp) 1188{ 1189 struct ablkcipher_request *req; 1190 1191 req = kmalloc(sizeof(struct ablkcipher_request) + 1192 crypto_ablkcipher_reqsize(tfm), gfp); 1193 1194 if (likely(req)) 1195 ablkcipher_request_set_tfm(req, tfm); 1196 1197 return req; 1198} 1199 1200/** 1201 * ablkcipher_request_free() - zeroize and free request data structure 1202 * @req: request data structure cipher handle to be freed 1203 */ 1204static inline void ablkcipher_request_free(struct ablkcipher_request *req) 1205{ 1206 kzfree(req); 1207} 1208 1209/** 1210 * ablkcipher_request_set_callback() - set asynchronous callback function 1211 * @req: request handle 1212 * @flags: specify zero or an ORing of the flags 1213 * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and 1214 * increase the wait queue beyond the initial maximum size; 1215 * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep 1216 * @compl: callback function pointer to be registered with the request handle 1217 * @data: The data pointer refers to memory that is not used by the kernel 1218 * crypto API, but provided to the callback function for it to use. Here, 1219 * the caller can provide a reference to memory the callback function can 1220 * operate on. As the callback function is invoked asynchronously to the 1221 * related functionality, it may need to access data structures of the 1222 * related functionality which can be referenced using this pointer. The 1223 * callback function can access the memory via the "data" field in the 1224 * crypto_async_request data structure provided to the callback function. 1225 * 1226 * This function allows setting the callback function that is triggered once the 1227 * cipher operation completes. 1228 * 1229 * The callback function is registered with the ablkcipher_request handle and 1230 * must comply with the following template:: 1231 * 1232 * void callback_function(struct crypto_async_request *req, int error) 1233 */ 1234static inline void ablkcipher_request_set_callback( 1235 struct ablkcipher_request *req, 1236 u32 flags, crypto_completion_t compl, void *data) 1237{ 1238 req->base.complete = compl; 1239 req->base.data = data; 1240 req->base.flags = flags; 1241} 1242 1243/** 1244 * ablkcipher_request_set_crypt() - set data buffers 1245 * @req: request handle 1246 * @src: source scatter / gather list 1247 * @dst: destination scatter / gather list 1248 * @nbytes: number of bytes to process from @src 1249 * @iv: IV for the cipher operation which must comply with the IV size defined 1250 * by crypto_ablkcipher_ivsize 1251 * 1252 * This function allows setting of the source data and destination data 1253 * scatter / gather lists. 1254 * 1255 * For encryption, the source is treated as the plaintext and the 1256 * destination is the ciphertext. For a decryption operation, the use is 1257 * reversed - the source is the ciphertext and the destination is the plaintext. 1258 */ 1259static inline void ablkcipher_request_set_crypt( 1260 struct ablkcipher_request *req, 1261 struct scatterlist *src, struct scatterlist *dst, 1262 unsigned int nbytes, void *iv) 1263{ 1264 req->src = src; 1265 req->dst = dst; 1266 req->nbytes = nbytes; 1267 req->info = iv; 1268} 1269 1270/** 1271 * DOC: Synchronous Block Cipher API 1272 * 1273 * The synchronous block cipher API is used with the ciphers of type 1274 * CRYPTO_ALG_TYPE_BLKCIPHER (listed as type "blkcipher" in /proc/crypto) 1275 * 1276 * Synchronous calls, have a context in the tfm. But since a single tfm can be 1277 * used in multiple calls and in parallel, this info should not be changeable 1278 * (unless a lock is used). This applies, for example, to the symmetric key. 1279 * However, the IV is changeable, so there is an iv field in blkcipher_tfm 1280 * structure for synchronous blkcipher api. So, its the only state info that can 1281 * be kept for synchronous calls without using a big lock across a tfm. 1282 * 1283 * The block cipher API allows the use of a complete cipher, i.e. a cipher 1284 * consisting of a template (a block chaining mode) and a single block cipher 1285 * primitive (e.g. AES). 1286 * 1287 * The plaintext data buffer and the ciphertext data buffer are pointed to 1288 * by using scatter/gather lists. The cipher operation is performed 1289 * on all segments of the provided scatter/gather lists. 1290 * 1291 * The kernel crypto API supports a cipher operation "in-place" which means that 1292 * the caller may provide the same scatter/gather list for the plaintext and 1293 * cipher text. After the completion of the cipher operation, the plaintext 1294 * data is replaced with the ciphertext data in case of an encryption and vice 1295 * versa for a decryption. The caller must ensure that the scatter/gather lists 1296 * for the output data point to sufficiently large buffers, i.e. multiples of 1297 * the block size of the cipher. 1298 */ 1299 1300static inline struct crypto_blkcipher *__crypto_blkcipher_cast( 1301 struct crypto_tfm *tfm) 1302{ 1303 return (struct crypto_blkcipher *)tfm; 1304} 1305 1306static inline struct crypto_blkcipher *crypto_blkcipher_cast( 1307 struct crypto_tfm *tfm) 1308{ 1309 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_BLKCIPHER); 1310 return __crypto_blkcipher_cast(tfm); 1311} 1312 1313/** 1314 * crypto_alloc_blkcipher() - allocate synchronous block cipher handle 1315 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the 1316 * blkcipher cipher 1317 * @type: specifies the type of the cipher 1318 * @mask: specifies the mask for the cipher 1319 * 1320 * Allocate a cipher handle for a block cipher. The returned struct 1321 * crypto_blkcipher is the cipher handle that is required for any subsequent 1322 * API invocation for that block cipher. 1323 * 1324 * Return: allocated cipher handle in case of success; IS_ERR() is true in case 1325 * of an error, PTR_ERR() returns the error code. 1326 */ 1327static inline struct crypto_blkcipher *crypto_alloc_blkcipher( 1328 const char *alg_name, u32 type, u32 mask) 1329{ 1330 type &= ~CRYPTO_ALG_TYPE_MASK; 1331 type |= CRYPTO_ALG_TYPE_BLKCIPHER; 1332 mask |= CRYPTO_ALG_TYPE_MASK; 1333 1334 return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask)); 1335} 1336 1337static inline struct crypto_tfm *crypto_blkcipher_tfm( 1338 struct crypto_blkcipher *tfm) 1339{ 1340 return &tfm->base; 1341} 1342 1343/** 1344 * crypto_free_blkcipher() - zeroize and free the block cipher handle 1345 * @tfm: cipher handle to be freed 1346 */ 1347static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm) 1348{ 1349 crypto_free_tfm(crypto_blkcipher_tfm(tfm)); 1350} 1351 1352/** 1353 * crypto_has_blkcipher() - Search for the availability of a block cipher 1354 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the 1355 * block cipher 1356 * @type: specifies the type of the cipher 1357 * @mask: specifies the mask for the cipher 1358 * 1359 * Return: true when the block cipher is known to the kernel crypto API; false 1360 * otherwise 1361 */ 1362static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask) 1363{ 1364 type &= ~CRYPTO_ALG_TYPE_MASK; 1365 type |= CRYPTO_ALG_TYPE_BLKCIPHER; 1366 mask |= CRYPTO_ALG_TYPE_MASK; 1367 1368 return crypto_has_alg(alg_name, type, mask); 1369} 1370 1371/** 1372 * crypto_blkcipher_name() - return the name / cra_name from the cipher handle 1373 * @tfm: cipher handle 1374 * 1375 * Return: The character string holding the name of the cipher 1376 */ 1377static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm) 1378{ 1379 return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm)); 1380} 1381 1382static inline struct blkcipher_tfm *crypto_blkcipher_crt( 1383 struct crypto_blkcipher *tfm) 1384{ 1385 return &crypto_blkcipher_tfm(tfm)->crt_blkcipher; 1386} 1387 1388static inline struct blkcipher_alg *crypto_blkcipher_alg( 1389 struct crypto_blkcipher *tfm) 1390{ 1391 return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher; 1392} 1393 1394/** 1395 * crypto_blkcipher_ivsize() - obtain IV size 1396 * @tfm: cipher handle 1397 * 1398 * The size of the IV for the block cipher referenced by the cipher handle is 1399 * returned. This IV size may be zero if the cipher does not need an IV. 1400 * 1401 * Return: IV size in bytes 1402 */ 1403static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm) 1404{ 1405 return crypto_blkcipher_alg(tfm)->ivsize; 1406} 1407 1408/** 1409 * crypto_blkcipher_blocksize() - obtain block size of cipher 1410 * @tfm: cipher handle 1411 * 1412 * The block size for the block cipher referenced with the cipher handle is 1413 * returned. The caller may use that information to allocate appropriate 1414 * memory for the data returned by the encryption or decryption operation. 1415 * 1416 * Return: block size of cipher 1417 */ 1418static inline unsigned int crypto_blkcipher_blocksize( 1419 struct crypto_blkcipher *tfm) 1420{ 1421 return crypto_tfm_alg_blocksize(crypto_blkcipher_tfm(tfm)); 1422} 1423 1424static inline unsigned int crypto_blkcipher_alignmask( 1425 struct crypto_blkcipher *tfm) 1426{ 1427 return crypto_tfm_alg_alignmask(crypto_blkcipher_tfm(tfm)); 1428} 1429 1430static inline u32 crypto_blkcipher_get_flags(struct crypto_blkcipher *tfm) 1431{ 1432 return crypto_tfm_get_flags(crypto_blkcipher_tfm(tfm)); 1433} 1434 1435static inline void crypto_blkcipher_set_flags(struct crypto_blkcipher *tfm, 1436 u32 flags) 1437{ 1438 crypto_tfm_set_flags(crypto_blkcipher_tfm(tfm), flags); 1439} 1440 1441static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm, 1442 u32 flags) 1443{ 1444 crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags); 1445} 1446 1447/** 1448 * crypto_blkcipher_setkey() - set key for cipher 1449 * @tfm: cipher handle 1450 * @key: buffer holding the key 1451 * @keylen: length of the key in bytes 1452 * 1453 * The caller provided key is set for the block cipher referenced by the cipher 1454 * handle. 1455 * 1456 * Note, the key length determines the cipher type. Many block ciphers implement 1457 * different cipher modes depending on the key size, such as AES-128 vs AES-192 1458 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 1459 * is performed. 1460 * 1461 * Return: 0 if the setting of the key was successful; < 0 if an error occurred 1462 */ 1463static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm, 1464 const u8 *key, unsigned int keylen) 1465{ 1466 return crypto_blkcipher_crt(tfm)->setkey(crypto_blkcipher_tfm(tfm), 1467 key, keylen); 1468} 1469 1470static inline int crypto_blkcipher_setkeytype(struct crypto_blkcipher *tfm, 1471 const u8 *key, 1472 unsigned int keylen) 1473{ 1474 return crypto_blkcipher_crt(tfm)->setkeytype(crypto_blkcipher_tfm(tfm), 1475 key, keylen); 1476} 1477 1478/** 1479 * crypto_blkcipher_encrypt() - encrypt plaintext 1480 * @desc: reference to the block cipher handle with meta data 1481 * @dst: scatter/gather list that is filled by the cipher operation with the 1482 * ciphertext 1483 * @src: scatter/gather list that holds the plaintext 1484 * @nbytes: number of bytes of the plaintext to encrypt. 1485 * 1486 * Encrypt plaintext data using the IV set by the caller with a preceding 1487 * call of crypto_blkcipher_set_iv. 1488 * 1489 * The blkcipher_desc data structure must be filled by the caller and can 1490 * reside on the stack. The caller must fill desc as follows: desc.tfm is filled 1491 * with the block cipher handle; desc.flags is filled with either 1492 * CRYPTO_TFM_REQ_MAY_SLEEP or 0. 1493 * 1494 * Return: 0 if the cipher operation was successful; < 0 if an error occurred 1495 */ 1496static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc, 1497 struct scatterlist *dst, 1498 struct scatterlist *src, 1499 unsigned int nbytes) 1500{ 1501 desc->info = crypto_blkcipher_crt(desc->tfm)->iv; 1502 return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); 1503} 1504 1505/** 1506 * crypto_blkcipher_encrypt_iv() - encrypt plaintext with dedicated IV 1507 * @desc: reference to the block cipher handle with meta data 1508 * @dst: scatter/gather list that is filled by the cipher operation with the 1509 * ciphertext 1510 * @src: scatter/gather list that holds the plaintext 1511 * @nbytes: number of bytes of the plaintext to encrypt. 1512 * 1513 * Encrypt plaintext data with the use of an IV that is solely used for this 1514 * cipher operation. Any previously set IV is not used. 1515 * 1516 * The blkcipher_desc data structure must be filled by the caller and can 1517 * reside on the stack. The caller must fill desc as follows: desc.tfm is filled 1518 * with the block cipher handle; desc.info is filled with the IV to be used for 1519 * the current operation; desc.flags is filled with either 1520 * CRYPTO_TFM_REQ_MAY_SLEEP or 0. 1521 * 1522 * Return: 0 if the cipher operation was successful; < 0 if an error occurred 1523 */ 1524static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc, 1525 struct scatterlist *dst, 1526 struct scatterlist *src, 1527 unsigned int nbytes) 1528{ 1529 return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); 1530} 1531 1532/** 1533 * crypto_blkcipher_decrypt() - decrypt ciphertext 1534 * @desc: reference to the block cipher handle with meta data 1535 * @dst: scatter/gather list that is filled by the cipher operation with the 1536 * plaintext 1537 * @src: scatter/gather list that holds the ciphertext 1538 * @nbytes: number of bytes of the ciphertext to decrypt. 1539 * 1540 * Decrypt ciphertext data using the IV set by the caller with a preceding 1541 * call of crypto_blkcipher_set_iv. 1542 * 1543 * The blkcipher_desc data structure must be filled by the caller as documented 1544 * for the crypto_blkcipher_encrypt call above. 1545 * 1546 * Return: 0 if the cipher operation was successful; < 0 if an error occurred 1547 * 1548 */ 1549static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc, 1550 struct scatterlist *dst, 1551 struct scatterlist *src, 1552 unsigned int nbytes) 1553{ 1554 desc->info = crypto_blkcipher_crt(desc->tfm)->iv; 1555 return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); 1556} 1557 1558/** 1559 * crypto_blkcipher_decrypt_iv() - decrypt ciphertext with dedicated IV 1560 * @desc: reference to the block cipher handle with meta data 1561 * @dst: scatter/gather list that is filled by the cipher operation with the 1562 * plaintext 1563 * @src: scatter/gather list that holds the ciphertext 1564 * @nbytes: number of bytes of the ciphertext to decrypt. 1565 * 1566 * Decrypt ciphertext data with the use of an IV that is solely used for this 1567 * cipher operation. Any previously set IV is not used. 1568 * 1569 * The blkcipher_desc data structure must be filled by the caller as documented 1570 * for the crypto_blkcipher_encrypt_iv call above. 1571 * 1572 * Return: 0 if the cipher operation was successful; < 0 if an error occurred 1573 */ 1574static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc, 1575 struct scatterlist *dst, 1576 struct scatterlist *src, 1577 unsigned int nbytes) 1578{ 1579 return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); 1580} 1581 1582/** 1583 * crypto_blkcipher_set_iv() - set IV for cipher 1584 * @tfm: cipher handle 1585 * @src: buffer holding the IV 1586 * @len: length of the IV in bytes 1587 * 1588 * The caller provided IV is set for the block cipher referenced by the cipher 1589 * handle. 1590 */ 1591static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm, 1592 const u8 *src, unsigned int len) 1593{ 1594 memcpy(crypto_blkcipher_crt(tfm)->iv, src, len); 1595} 1596 1597/** 1598 * crypto_blkcipher_get_iv() - obtain IV from cipher 1599 * @tfm: cipher handle 1600 * @dst: buffer filled with the IV 1601 * @len: length of the buffer dst 1602 * 1603 * The caller can obtain the IV set for the block cipher referenced by the 1604 * cipher handle and store it into the user-provided buffer. If the buffer 1605 * has an insufficient space, the IV is truncated to fit the buffer. 1606 */ 1607static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm, 1608 u8 *dst, unsigned int len) 1609{ 1610 memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len); 1611} 1612 1613/** 1614 * DOC: Single Block Cipher API 1615 * 1616 * The single block cipher API is used with the ciphers of type 1617 * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto). 1618 * 1619 * Using the single block cipher API calls, operations with the basic cipher 1620 * primitive can be implemented. These cipher primitives exclude any block 1621 * chaining operations including IV handling. 1622 * 1623 * The purpose of this single block cipher API is to support the implementation 1624 * of templates or other concepts that only need to perform the cipher operation 1625 * on one block at a time. Templates invoke the underlying cipher primitive 1626 * block-wise and process either the input or the output data of these cipher 1627 * operations. 1628 */ 1629 1630static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm) 1631{ 1632 return (struct crypto_cipher *)tfm; 1633} 1634 1635static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm) 1636{ 1637 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER); 1638 return __crypto_cipher_cast(tfm); 1639} 1640 1641/** 1642 * crypto_alloc_cipher() - allocate single block cipher handle 1643 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the 1644 * single block cipher 1645 * @type: specifies the type of the cipher 1646 * @mask: specifies the mask for the cipher 1647 * 1648 * Allocate a cipher handle for a single block cipher. The returned struct 1649 * crypto_cipher is the cipher handle that is required for any subsequent API 1650 * invocation for that single block cipher. 1651 * 1652 * Return: allocated cipher handle in case of success; IS_ERR() is true in case 1653 * of an error, PTR_ERR() returns the error code. 1654 */ 1655static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name, 1656 u32 type, u32 mask) 1657{ 1658 type &= ~CRYPTO_ALG_TYPE_MASK; 1659 type |= CRYPTO_ALG_TYPE_CIPHER; 1660 mask |= CRYPTO_ALG_TYPE_MASK; 1661 1662 return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask)); 1663} 1664 1665static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm) 1666{ 1667 return &tfm->base; 1668} 1669 1670/** 1671 * crypto_free_cipher() - zeroize and free the single block cipher handle 1672 * @tfm: cipher handle to be freed 1673 */ 1674static inline void crypto_free_cipher(struct crypto_cipher *tfm) 1675{ 1676 crypto_free_tfm(crypto_cipher_tfm(tfm)); 1677} 1678 1679/** 1680 * crypto_has_cipher() - Search for the availability of a single block cipher 1681 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the 1682 * single block cipher 1683 * @type: specifies the type of the cipher 1684 * @mask: specifies the mask for the cipher 1685 * 1686 * Return: true when the single block cipher is known to the kernel crypto API; 1687 * false otherwise 1688 */ 1689static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask) 1690{ 1691 type &= ~CRYPTO_ALG_TYPE_MASK; 1692 type |= CRYPTO_ALG_TYPE_CIPHER; 1693 mask |= CRYPTO_ALG_TYPE_MASK; 1694 1695 return crypto_has_alg(alg_name, type, mask); 1696} 1697 1698static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm) 1699{ 1700 return &crypto_cipher_tfm(tfm)->crt_cipher; 1701} 1702 1703/** 1704 * crypto_cipher_blocksize() - obtain block size for cipher 1705 * @tfm: cipher handle 1706 * 1707 * The block size for the single block cipher referenced with the cipher handle 1708 * tfm is returned. The caller may use that information to allocate appropriate 1709 * memory for the data returned by the encryption or decryption operation 1710 * 1711 * Return: block size of cipher 1712 */ 1713static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm) 1714{ 1715 return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm)); 1716} 1717 1718static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm) 1719{ 1720 return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm)); 1721} 1722 1723static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm) 1724{ 1725 return crypto_tfm_get_flags(crypto_cipher_tfm(tfm)); 1726} 1727 1728static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm, 1729 u32 flags) 1730{ 1731 crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags); 1732} 1733 1734static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm, 1735 u32 flags) 1736{ 1737 crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags); 1738} 1739 1740/** 1741 * crypto_cipher_setkey() - set key for cipher 1742 * @tfm: cipher handle 1743 * @key: buffer holding the key 1744 * @keylen: length of the key in bytes 1745 * 1746 * The caller provided key is set for the single block cipher referenced by the 1747 * cipher handle. 1748 * 1749 * Note, the key length determines the cipher type. Many block ciphers implement 1750 * different cipher modes depending on the key size, such as AES-128 vs AES-192 1751 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 1752 * is performed. 1753 * 1754 * Return: 0 if the setting of the key was successful; < 0 if an error occurred 1755 */ 1756static inline int crypto_cipher_setkey(struct crypto_cipher *tfm, 1757 const u8 *key, unsigned int keylen) 1758{ 1759 return crypto_cipher_crt(tfm)->cit_setkey(crypto_cipher_tfm(tfm), 1760 key, keylen); 1761} 1762 1763/** 1764 * crypto_cipher_encrypt_one() - encrypt one block of plaintext 1765 * @tfm: cipher handle 1766 * @dst: points to the buffer that will be filled with the ciphertext 1767 * @src: buffer holding the plaintext to be encrypted 1768 * 1769 * Invoke the encryption operation of one block. The caller must ensure that 1770 * the plaintext and ciphertext buffers are at least one block in size. 1771 */ 1772static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, 1773 u8 *dst, const u8 *src) 1774{ 1775 crypto_cipher_crt(tfm)->cit_encrypt_one(crypto_cipher_tfm(tfm), 1776 dst, src); 1777} 1778 1779/** 1780 * crypto_cipher_decrypt_one() - decrypt one block of ciphertext 1781 * @tfm: cipher handle 1782 * @dst: points to the buffer that will be filled with the plaintext 1783 * @src: buffer holding the ciphertext to be decrypted 1784 * 1785 * Invoke the decryption operation of one block. The caller must ensure that 1786 * the plaintext and ciphertext buffers are at least one block in size. 1787 */ 1788static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, 1789 u8 *dst, const u8 *src) 1790{ 1791 crypto_cipher_crt(tfm)->cit_decrypt_one(crypto_cipher_tfm(tfm), 1792 dst, src); 1793} 1794 1795static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm) 1796{ 1797 return (struct crypto_comp *)tfm; 1798} 1799 1800static inline struct crypto_comp *crypto_comp_cast(struct crypto_tfm *tfm) 1801{ 1802 BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_COMPRESS) & 1803 CRYPTO_ALG_TYPE_MASK); 1804 return __crypto_comp_cast(tfm); 1805} 1806 1807static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name, 1808 u32 type, u32 mask) 1809{ 1810 type &= ~CRYPTO_ALG_TYPE_MASK; 1811 type |= CRYPTO_ALG_TYPE_COMPRESS; 1812 mask |= CRYPTO_ALG_TYPE_MASK; 1813 1814 return __crypto_comp_cast(crypto_alloc_base(alg_name, type, mask)); 1815} 1816 1817static inline struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm) 1818{ 1819 return &tfm->base; 1820} 1821 1822static inline void crypto_free_comp(struct crypto_comp *tfm) 1823{ 1824 crypto_free_tfm(crypto_comp_tfm(tfm)); 1825} 1826 1827static inline int crypto_has_comp(const char *alg_name, u32 type, u32 mask) 1828{ 1829 type &= ~CRYPTO_ALG_TYPE_MASK; 1830 type |= CRYPTO_ALG_TYPE_COMPRESS; 1831 mask |= CRYPTO_ALG_TYPE_MASK; 1832 1833 return crypto_has_alg(alg_name, type, mask); 1834} 1835 1836static inline const char *crypto_comp_name(struct crypto_comp *tfm) 1837{ 1838 return crypto_tfm_alg_name(crypto_comp_tfm(tfm)); 1839} 1840 1841static inline struct compress_tfm *crypto_comp_crt(struct crypto_comp *tfm) 1842{ 1843 return &crypto_comp_tfm(tfm)->crt_compress; 1844} 1845 1846static inline int crypto_comp_compress(struct crypto_comp *tfm, 1847 const u8 *src, unsigned int slen, 1848 u8 *dst, unsigned int *dlen) 1849{ 1850 return crypto_comp_crt(tfm)->cot_compress(crypto_comp_tfm(tfm), 1851 src, slen, dst, dlen); 1852} 1853 1854static inline int crypto_comp_decompress(struct crypto_comp *tfm, 1855 const u8 *src, unsigned int slen, 1856 u8 *dst, unsigned int *dlen) 1857{ 1858 return crypto_comp_crt(tfm)->cot_decompress(crypto_comp_tfm(tfm), 1859 src, slen, dst, dlen); 1860} 1861 1862#endif /* _LINUX_CRYPTO_H */ 1863 1864