linux/crypto/aegis128-neon-inner.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright (C) 2019 Linaro, Ltd. <ard.biesheuvel@linaro.org>
   4 */
   5
   6#ifdef CONFIG_ARM64
   7#include <asm/neon-intrinsics.h>
   8
   9#define AES_ROUND       "aese %0.16b, %1.16b \n\t aesmc %0.16b, %0.16b"
  10#else
  11#include <arm_neon.h>
  12
  13#define AES_ROUND       "aese.8 %q0, %q1 \n\t aesmc.8 %q0, %q0"
  14#endif
  15
  16#define AEGIS_BLOCK_SIZE        16
  17
  18#include <stddef.h>
  19
  20extern int aegis128_have_aes_insn;
  21
  22void *memcpy(void *dest, const void *src, size_t n);
  23void *memset(void *s, int c, size_t n);
  24
  25struct aegis128_state {
  26        uint8x16_t v[5];
  27};
  28
  29extern const uint8_t crypto_aes_sbox[];
  30
  31static struct aegis128_state aegis128_load_state_neon(const void *state)
  32{
  33        return (struct aegis128_state){ {
  34                vld1q_u8(state),
  35                vld1q_u8(state + 16),
  36                vld1q_u8(state + 32),
  37                vld1q_u8(state + 48),
  38                vld1q_u8(state + 64)
  39        } };
  40}
  41
  42static void aegis128_save_state_neon(struct aegis128_state st, void *state)
  43{
  44        vst1q_u8(state, st.v[0]);
  45        vst1q_u8(state + 16, st.v[1]);
  46        vst1q_u8(state + 32, st.v[2]);
  47        vst1q_u8(state + 48, st.v[3]);
  48        vst1q_u8(state + 64, st.v[4]);
  49}
  50
  51static inline __attribute__((always_inline))
  52uint8x16_t aegis_aes_round(uint8x16_t w)
  53{
  54        uint8x16_t z = {};
  55
  56#ifdef CONFIG_ARM64
  57        if (!__builtin_expect(aegis128_have_aes_insn, 1)) {
  58                static const uint8_t shift_rows[] = {
  59                        0x0, 0x5, 0xa, 0xf, 0x4, 0x9, 0xe, 0x3,
  60                        0x8, 0xd, 0x2, 0x7, 0xc, 0x1, 0x6, 0xb,
  61                };
  62                static const uint8_t ror32by8[] = {
  63                        0x1, 0x2, 0x3, 0x0, 0x5, 0x6, 0x7, 0x4,
  64                        0x9, 0xa, 0xb, 0x8, 0xd, 0xe, 0xf, 0xc,
  65                };
  66                uint8x16_t v;
  67
  68                // shift rows
  69                w = vqtbl1q_u8(w, vld1q_u8(shift_rows));
  70
  71                // sub bytes
  72#ifndef CONFIG_CC_IS_GCC
  73                v = vqtbl4q_u8(vld1q_u8_x4(crypto_aes_sbox), w);
  74                v = vqtbx4q_u8(v, vld1q_u8_x4(crypto_aes_sbox + 0x40), w - 0x40);
  75                v = vqtbx4q_u8(v, vld1q_u8_x4(crypto_aes_sbox + 0x80), w - 0x80);
  76                v = vqtbx4q_u8(v, vld1q_u8_x4(crypto_aes_sbox + 0xc0), w - 0xc0);
  77#else
  78                asm("tbl %0.16b, {v16.16b-v19.16b}, %1.16b" : "=w"(v) : "w"(w));
  79                w -= 0x40;
  80                asm("tbx %0.16b, {v20.16b-v23.16b}, %1.16b" : "+w"(v) : "w"(w));
  81                w -= 0x40;
  82                asm("tbx %0.16b, {v24.16b-v27.16b}, %1.16b" : "+w"(v) : "w"(w));
  83                w -= 0x40;
  84                asm("tbx %0.16b, {v28.16b-v31.16b}, %1.16b" : "+w"(v) : "w"(w));
  85#endif
  86
  87                // mix columns
  88                w = (v << 1) ^ (uint8x16_t)(((int8x16_t)v >> 7) & 0x1b);
  89                w ^= (uint8x16_t)vrev32q_u16((uint16x8_t)v);
  90                w ^= vqtbl1q_u8(v ^ w, vld1q_u8(ror32by8));
  91
  92                return w;
  93        }
  94#endif
  95
  96        /*
  97         * We use inline asm here instead of the vaeseq_u8/vaesmcq_u8 intrinsics
  98         * to force the compiler to issue the aese/aesmc instructions in pairs.
  99         * This is much faster on many cores, where the instruction pair can
 100         * execute in a single cycle.
 101         */
 102        asm(AES_ROUND : "+w"(w) : "w"(z));
 103        return w;
 104}
 105
 106static inline __attribute__((always_inline))
 107struct aegis128_state aegis128_update_neon(struct aegis128_state st,
 108                                           uint8x16_t m)
 109{
 110        m       ^= aegis_aes_round(st.v[4]);
 111        st.v[4] ^= aegis_aes_round(st.v[3]);
 112        st.v[3] ^= aegis_aes_round(st.v[2]);
 113        st.v[2] ^= aegis_aes_round(st.v[1]);
 114        st.v[1] ^= aegis_aes_round(st.v[0]);
 115        st.v[0] ^= m;
 116
 117        return st;
 118}
 119
 120static inline __attribute__((always_inline))
 121void preload_sbox(void)
 122{
 123        if (!IS_ENABLED(CONFIG_ARM64) ||
 124            !IS_ENABLED(CONFIG_CC_IS_GCC) ||
 125            __builtin_expect(aegis128_have_aes_insn, 1))
 126                return;
 127
 128        asm("ld1        {v16.16b-v19.16b}, [%0], #64    \n\t"
 129            "ld1        {v20.16b-v23.16b}, [%0], #64    \n\t"
 130            "ld1        {v24.16b-v27.16b}, [%0], #64    \n\t"
 131            "ld1        {v28.16b-v31.16b}, [%0]         \n\t"
 132            :: "r"(crypto_aes_sbox));
 133}
 134
 135void crypto_aegis128_init_neon(void *state, const void *key, const void *iv)
 136{
 137        static const uint8_t const0[] = {
 138                0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x08, 0x0d,
 139                0x15, 0x22, 0x37, 0x59, 0x90, 0xe9, 0x79, 0x62,
 140        };
 141        static const uint8_t const1[] = {
 142                0xdb, 0x3d, 0x18, 0x55, 0x6d, 0xc2, 0x2f, 0xf1,
 143                0x20, 0x11, 0x31, 0x42, 0x73, 0xb5, 0x28, 0xdd,
 144        };
 145        uint8x16_t k = vld1q_u8(key);
 146        uint8x16_t kiv = k ^ vld1q_u8(iv);
 147        struct aegis128_state st = {{
 148                kiv,
 149                vld1q_u8(const1),
 150                vld1q_u8(const0),
 151                k ^ vld1q_u8(const0),
 152                k ^ vld1q_u8(const1),
 153        }};
 154        int i;
 155
 156        preload_sbox();
 157
 158        for (i = 0; i < 5; i++) {
 159                st = aegis128_update_neon(st, k);
 160                st = aegis128_update_neon(st, kiv);
 161        }
 162        aegis128_save_state_neon(st, state);
 163}
 164
 165void crypto_aegis128_update_neon(void *state, const void *msg)
 166{
 167        struct aegis128_state st = aegis128_load_state_neon(state);
 168
 169        preload_sbox();
 170
 171        st = aegis128_update_neon(st, vld1q_u8(msg));
 172
 173        aegis128_save_state_neon(st, state);
 174}
 175
 176void crypto_aegis128_encrypt_chunk_neon(void *state, void *dst, const void *src,
 177                                        unsigned int size)
 178{
 179        struct aegis128_state st = aegis128_load_state_neon(state);
 180        uint8x16_t msg;
 181
 182        preload_sbox();
 183
 184        while (size >= AEGIS_BLOCK_SIZE) {
 185                uint8x16_t s = st.v[1] ^ (st.v[2] & st.v[3]) ^ st.v[4];
 186
 187                msg = vld1q_u8(src);
 188                st = aegis128_update_neon(st, msg);
 189                vst1q_u8(dst, msg ^ s);
 190
 191                size -= AEGIS_BLOCK_SIZE;
 192                src += AEGIS_BLOCK_SIZE;
 193                dst += AEGIS_BLOCK_SIZE;
 194        }
 195
 196        if (size > 0) {
 197                uint8x16_t s = st.v[1] ^ (st.v[2] & st.v[3]) ^ st.v[4];
 198                uint8_t buf[AEGIS_BLOCK_SIZE] = {};
 199
 200                memcpy(buf, src, size);
 201                msg = vld1q_u8(buf);
 202                st = aegis128_update_neon(st, msg);
 203                vst1q_u8(buf, msg ^ s);
 204                memcpy(dst, buf, size);
 205        }
 206
 207        aegis128_save_state_neon(st, state);
 208}
 209
 210void crypto_aegis128_decrypt_chunk_neon(void *state, void *dst, const void *src,
 211                                        unsigned int size)
 212{
 213        struct aegis128_state st = aegis128_load_state_neon(state);
 214        uint8x16_t msg;
 215
 216        preload_sbox();
 217
 218        while (size >= AEGIS_BLOCK_SIZE) {
 219                msg = vld1q_u8(src) ^ st.v[1] ^ (st.v[2] & st.v[3]) ^ st.v[4];
 220                st = aegis128_update_neon(st, msg);
 221                vst1q_u8(dst, msg);
 222
 223                size -= AEGIS_BLOCK_SIZE;
 224                src += AEGIS_BLOCK_SIZE;
 225                dst += AEGIS_BLOCK_SIZE;
 226        }
 227
 228        if (size > 0) {
 229                uint8x16_t s = st.v[1] ^ (st.v[2] & st.v[3]) ^ st.v[4];
 230                uint8_t buf[AEGIS_BLOCK_SIZE];
 231
 232                vst1q_u8(buf, s);
 233                memcpy(buf, src, size);
 234                msg = vld1q_u8(buf) ^ s;
 235                vst1q_u8(buf, msg);
 236                memcpy(dst, buf, size);
 237
 238                st = aegis128_update_neon(st, msg);
 239        }
 240
 241        aegis128_save_state_neon(st, state);
 242}
 243
 244void crypto_aegis128_final_neon(void *state, void *tag_xor, uint64_t assoclen,
 245                                uint64_t cryptlen)
 246{
 247        struct aegis128_state st = aegis128_load_state_neon(state);
 248        uint8x16_t v;
 249        int i;
 250
 251        preload_sbox();
 252
 253        v = st.v[3] ^ (uint8x16_t)vcombine_u64(vmov_n_u64(8 * assoclen),
 254                                               vmov_n_u64(8 * cryptlen));
 255
 256        for (i = 0; i < 7; i++)
 257                st = aegis128_update_neon(st, v);
 258
 259        v = vld1q_u8(tag_xor);
 260        v ^= st.v[0] ^ st.v[1] ^ st.v[2] ^ st.v[3] ^ st.v[4];
 261        vst1q_u8(tag_xor, v);
 262}
 263