linux/include/crypto/algapi.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/*
   3 * Cryptographic API for algorithms (i.e., low-level API).
   4 *
   5 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
   6 */
   7#ifndef _CRYPTO_ALGAPI_H
   8#define _CRYPTO_ALGAPI_H
   9
  10#include <linux/align.h>
  11#include <linux/crypto.h>
  12#include <linux/kconfig.h>
  13#include <linux/list.h>
  14#include <linux/types.h>
  15
  16#include <asm/unaligned.h>
  17
  18/*
  19 * Maximum values for blocksize and alignmask, used to allocate
  20 * static buffers that are big enough for any combination of
  21 * algs and architectures. Ciphers have a lower maximum size.
  22 */
  23#define MAX_ALGAPI_BLOCKSIZE            160
  24#define MAX_ALGAPI_ALIGNMASK            63
  25#define MAX_CIPHER_BLOCKSIZE            16
  26#define MAX_CIPHER_ALIGNMASK            15
  27
  28struct crypto_aead;
  29struct crypto_instance;
  30struct module;
  31struct notifier_block;
  32struct rtattr;
  33struct seq_file;
  34struct sk_buff;
  35
  36struct crypto_type {
  37        unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
  38        unsigned int (*extsize)(struct crypto_alg *alg);
  39        int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
  40        int (*init_tfm)(struct crypto_tfm *tfm);
  41        void (*show)(struct seq_file *m, struct crypto_alg *alg);
  42        int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
  43        void (*free)(struct crypto_instance *inst);
  44
  45        unsigned int type;
  46        unsigned int maskclear;
  47        unsigned int maskset;
  48        unsigned int tfmsize;
  49};
  50
  51struct crypto_instance {
  52        struct crypto_alg alg;
  53
  54        struct crypto_template *tmpl;
  55
  56        union {
  57                /* Node in list of instances after registration. */
  58                struct hlist_node list;
  59                /* List of attached spawns before registration. */
  60                struct crypto_spawn *spawns;
  61        };
  62
  63        void *__ctx[] CRYPTO_MINALIGN_ATTR;
  64};
  65
  66struct crypto_template {
  67        struct list_head list;
  68        struct hlist_head instances;
  69        struct module *module;
  70
  71        int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
  72
  73        char name[CRYPTO_MAX_ALG_NAME];
  74};
  75
  76struct crypto_spawn {
  77        struct list_head list;
  78        struct crypto_alg *alg;
  79        union {
  80                /* Back pointer to instance after registration.*/
  81                struct crypto_instance *inst;
  82                /* Spawn list pointer prior to registration. */
  83                struct crypto_spawn *next;
  84        };
  85        const struct crypto_type *frontend;
  86        u32 mask;
  87        bool dead;
  88        bool registered;
  89};
  90
  91struct crypto_queue {
  92        struct list_head list;
  93        struct list_head *backlog;
  94
  95        unsigned int qlen;
  96        unsigned int max_qlen;
  97};
  98
  99struct scatter_walk {
 100        struct scatterlist *sg;
 101        unsigned int offset;
 102};
 103
 104struct crypto_attr_alg {
 105        char name[CRYPTO_MAX_ALG_NAME];
 106};
 107
 108struct crypto_attr_type {
 109        u32 type;
 110        u32 mask;
 111};
 112
 113void crypto_mod_put(struct crypto_alg *alg);
 114
 115int crypto_register_template(struct crypto_template *tmpl);
 116int crypto_register_templates(struct crypto_template *tmpls, int count);
 117void crypto_unregister_template(struct crypto_template *tmpl);
 118void crypto_unregister_templates(struct crypto_template *tmpls, int count);
 119struct crypto_template *crypto_lookup_template(const char *name);
 120
 121int crypto_register_instance(struct crypto_template *tmpl,
 122                             struct crypto_instance *inst);
 123void crypto_unregister_instance(struct crypto_instance *inst);
 124
 125int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst,
 126                      const char *name, u32 type, u32 mask);
 127void crypto_drop_spawn(struct crypto_spawn *spawn);
 128struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
 129                                    u32 mask);
 130void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
 131
 132struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
 133int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret);
 134const char *crypto_attr_alg_name(struct rtattr *rta);
 135int crypto_inst_setname(struct crypto_instance *inst, const char *name,
 136                        struct crypto_alg *alg);
 137
 138void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
 139int crypto_enqueue_request(struct crypto_queue *queue,
 140                           struct crypto_async_request *request);
 141void crypto_enqueue_request_head(struct crypto_queue *queue,
 142                                 struct crypto_async_request *request);
 143struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
 144static inline unsigned int crypto_queue_len(struct crypto_queue *queue)
 145{
 146        return queue->qlen;
 147}
 148
 149void crypto_inc(u8 *a, unsigned int size);
 150void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size);
 151
 152static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
 153{
 154        if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
 155            __builtin_constant_p(size) &&
 156            (size % sizeof(unsigned long)) == 0) {
 157                unsigned long *d = (unsigned long *)dst;
 158                unsigned long *s = (unsigned long *)src;
 159                unsigned long l;
 160
 161                while (size > 0) {
 162                        l = get_unaligned(d) ^ get_unaligned(s++);
 163                        put_unaligned(l, d++);
 164                        size -= sizeof(unsigned long);
 165                }
 166        } else {
 167                __crypto_xor(dst, dst, src, size);
 168        }
 169}
 170
 171static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
 172                                  unsigned int size)
 173{
 174        if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
 175            __builtin_constant_p(size) &&
 176            (size % sizeof(unsigned long)) == 0) {
 177                unsigned long *d = (unsigned long *)dst;
 178                unsigned long *s1 = (unsigned long *)src1;
 179                unsigned long *s2 = (unsigned long *)src2;
 180                unsigned long l;
 181
 182                while (size > 0) {
 183                        l = get_unaligned(s1++) ^ get_unaligned(s2++);
 184                        put_unaligned(l, d++);
 185                        size -= sizeof(unsigned long);
 186                }
 187        } else {
 188                __crypto_xor(dst, src1, src2, size);
 189        }
 190}
 191
 192static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
 193{
 194        return PTR_ALIGN(crypto_tfm_ctx(tfm),
 195                         crypto_tfm_alg_alignmask(tfm) + 1);
 196}
 197
 198static inline struct crypto_instance *crypto_tfm_alg_instance(
 199        struct crypto_tfm *tfm)
 200{
 201        return container_of(tfm->__crt_alg, struct crypto_instance, alg);
 202}
 203
 204static inline void *crypto_instance_ctx(struct crypto_instance *inst)
 205{
 206        return inst->__ctx;
 207}
 208
 209static inline struct crypto_async_request *crypto_get_backlog(
 210        struct crypto_queue *queue)
 211{
 212        return queue->backlog == &queue->list ? NULL :
 213               container_of(queue->backlog, struct crypto_async_request, list);
 214}
 215
 216static inline u32 crypto_requires_off(struct crypto_attr_type *algt, u32 off)
 217{
 218        return (algt->type ^ off) & algt->mask & off;
 219}
 220
 221/*
 222 * When an algorithm uses another algorithm (e.g., if it's an instance of a
 223 * template), these are the flags that should always be set on the "outer"
 224 * algorithm if any "inner" algorithm has them set.
 225 */
 226#define CRYPTO_ALG_INHERITED_FLAGS      \
 227        (CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |  \
 228         CRYPTO_ALG_ALLOCATES_MEMORY)
 229
 230/*
 231 * Given the type and mask that specify the flags restrictions on a template
 232 * instance being created, return the mask that should be passed to
 233 * crypto_grab_*() (along with type=0) to honor any request the user made to
 234 * have any of the CRYPTO_ALG_INHERITED_FLAGS clear.
 235 */
 236static inline u32 crypto_algt_inherited_mask(struct crypto_attr_type *algt)
 237{
 238        return crypto_requires_off(algt, CRYPTO_ALG_INHERITED_FLAGS);
 239}
 240
 241noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
 242
 243/**
 244 * crypto_memneq - Compare two areas of memory without leaking
 245 *                 timing information.
 246 *
 247 * @a: One area of memory
 248 * @b: Another area of memory
 249 * @size: The size of the area.
 250 *
 251 * Returns 0 when data is equal, 1 otherwise.
 252 */
 253static inline int crypto_memneq(const void *a, const void *b, size_t size)
 254{
 255        return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
 256}
 257
 258int crypto_register_notifier(struct notifier_block *nb);
 259int crypto_unregister_notifier(struct notifier_block *nb);
 260
 261/* Crypto notification events. */
 262enum {
 263        CRYPTO_MSG_ALG_REQUEST,
 264        CRYPTO_MSG_ALG_REGISTER,
 265        CRYPTO_MSG_ALG_LOADED,
 266};
 267
 268#endif  /* _CRYPTO_ALGAPI_H */
 269