linux/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
<<
>>
Prefs
   1/*
   2 * Twofish Cipher 8-way parallel algorithm (AVX/x86_64)
   3 *
   4 * Copyright (C) 2012 Johannes Goetzfried
   5 *     <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
   6 *
   7 * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License as published by
  11 * the Free Software Foundation; either version 2 of the License, or
  12 * (at your option) any later version.
  13 *
  14 * This program is distributed in the hope that it will be useful,
  15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  17 * GNU General Public License for more details.
  18 *
  19 * You should have received a copy of the GNU General Public License
  20 * along with this program; if not, write to the Free Software
  21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
  22 * USA
  23 *
  24 */
  25
  26#include <linux/linkage.h>
  27#include <asm/frame.h>
  28#include "glue_helper-asm-avx.S"
  29
  30.file "twofish-avx-x86_64-asm_64.S"
  31
  32.data
  33.align 16
  34
  35.Lbswap128_mask:
  36        .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
  37.Lxts_gf128mul_and_shl1_mask:
  38        .byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
  39
  40.text
  41
  42/* structure of crypto context */
  43#define s0      0
  44#define s1      1024
  45#define s2      2048
  46#define s3      3072
  47#define w       4096
  48#define k       4128
  49
  50/**********************************************************************
  51  8-way AVX twofish
  52 **********************************************************************/
  53#define CTX %rdi
  54
  55#define RA1 %xmm0
  56#define RB1 %xmm1
  57#define RC1 %xmm2
  58#define RD1 %xmm3
  59
  60#define RA2 %xmm4
  61#define RB2 %xmm5
  62#define RC2 %xmm6
  63#define RD2 %xmm7
  64
  65#define RX0 %xmm8
  66#define RY0 %xmm9
  67
  68#define RX1 %xmm10
  69#define RY1 %xmm11
  70
  71#define RK1 %xmm12
  72#define RK2 %xmm13
  73
  74#define RT %xmm14
  75#define RR %xmm15
  76
  77#define RID1  %rbp
  78#define RID1d %ebp
  79#define RID2  %rsi
  80#define RID2d %esi
  81
  82#define RGI1   %rdx
  83#define RGI1bl %dl
  84#define RGI1bh %dh
  85#define RGI2   %rcx
  86#define RGI2bl %cl
  87#define RGI2bh %ch
  88
  89#define RGI3   %rax
  90#define RGI3bl %al
  91#define RGI3bh %ah
  92#define RGI4   %rbx
  93#define RGI4bl %bl
  94#define RGI4bh %bh
  95
  96#define RGS1  %r8
  97#define RGS1d %r8d
  98#define RGS2  %r9
  99#define RGS2d %r9d
 100#define RGS3  %r10
 101#define RGS3d %r10d
 102
 103
 104#define lookup_32bit(t0, t1, t2, t3, src, dst, interleave_op, il_reg) \
 105        movzbl          src ## bl,        RID1d;     \
 106        movzbl          src ## bh,        RID2d;     \
 107        shrq $16,       src;                         \
 108        movl            t0(CTX, RID1, 4), dst ## d;  \
 109        movl            t1(CTX, RID2, 4), RID2d;     \
 110        movzbl          src ## bl,        RID1d;     \
 111        xorl            RID2d,            dst ## d;  \
 112        movzbl          src ## bh,        RID2d;     \
 113        interleave_op(il_reg);                       \
 114        xorl            t2(CTX, RID1, 4), dst ## d;  \
 115        xorl            t3(CTX, RID2, 4), dst ## d;
 116
 117#define dummy(d) /* do nothing */
 118
 119#define shr_next(reg) \
 120        shrq $16,       reg;
 121
 122#define G(gi1, gi2, x, t0, t1, t2, t3) \
 123        lookup_32bit(t0, t1, t2, t3, ##gi1, RGS1, shr_next, ##gi1);  \
 124        lookup_32bit(t0, t1, t2, t3, ##gi2, RGS3, shr_next, ##gi2);  \
 125        \
 126        lookup_32bit(t0, t1, t2, t3, ##gi1, RGS2, dummy, none);      \
 127        shlq $32,       RGS2;                                        \
 128        orq             RGS1, RGS2;                                  \
 129        lookup_32bit(t0, t1, t2, t3, ##gi2, RGS1, dummy, none);      \
 130        shlq $32,       RGS1;                                        \
 131        orq             RGS1, RGS3;
 132
 133#define round_head_2(a, b, x1, y1, x2, y2) \
 134        vmovq           b ## 1, RGI3;           \
 135        vpextrq $1,     b ## 1, RGI4;           \
 136        \
 137        G(RGI1, RGI2, x1, s0, s1, s2, s3);      \
 138        vmovq           a ## 2, RGI1;           \
 139        vpextrq $1,     a ## 2, RGI2;           \
 140        vmovq           RGS2, x1;               \
 141        vpinsrq $1,     RGS3, x1, x1;           \
 142        \
 143        G(RGI3, RGI4, y1, s1, s2, s3, s0);      \
 144        vmovq           b ## 2, RGI3;           \
 145        vpextrq $1,     b ## 2, RGI4;           \
 146        vmovq           RGS2, y1;               \
 147        vpinsrq $1,     RGS3, y1, y1;           \
 148        \
 149        G(RGI1, RGI2, x2, s0, s1, s2, s3);      \
 150        vmovq           RGS2, x2;               \
 151        vpinsrq $1,     RGS3, x2, x2;           \
 152        \
 153        G(RGI3, RGI4, y2, s1, s2, s3, s0);      \
 154        vmovq           RGS2, y2;               \
 155        vpinsrq $1,     RGS3, y2, y2;
 156
 157#define encround_tail(a, b, c, d, x, y, prerotate) \
 158        vpaddd                  x, y,   x; \
 159        vpaddd                  x, RK1, RT;\
 160        prerotate(b);                      \
 161        vpxor                   RT, c,  c; \
 162        vpaddd                  y, x,   y; \
 163        vpaddd                  y, RK2, y; \
 164        vpsrld $1,              c, RT;     \
 165        vpslld $(32 - 1),       c, c;      \
 166        vpor                    c, RT,  c; \
 167        vpxor                   d, y,   d; \
 168
 169#define decround_tail(a, b, c, d, x, y, prerotate) \
 170        vpaddd                  x, y,   x; \
 171        vpaddd                  x, RK1, RT;\
 172        prerotate(a);                      \
 173        vpxor                   RT, c,  c; \
 174        vpaddd                  y, x,   y; \
 175        vpaddd                  y, RK2, y; \
 176        vpxor                   d, y,   d; \
 177        vpsrld $1,              d, y;      \
 178        vpslld $(32 - 1),       d, d;      \
 179        vpor                    d, y,   d; \
 180
 181#define rotate_1l(x) \
 182        vpslld $1,              x, RR;     \
 183        vpsrld $(32 - 1),       x, x;      \
 184        vpor                    x, RR,  x;
 185
 186#define preload_rgi(c) \
 187        vmovq                   c, RGI1; \
 188        vpextrq $1,             c, RGI2;
 189
 190#define encrypt_round(n, a, b, c, d, preload, prerotate) \
 191        vbroadcastss (k+4*(2*(n)))(CTX),   RK1;                  \
 192        vbroadcastss (k+4*(2*(n)+1))(CTX), RK2;                  \
 193        round_head_2(a, b, RX0, RY0, RX1, RY1);                  \
 194        encround_tail(a ## 1, b ## 1, c ## 1, d ## 1, RX0, RY0, prerotate); \
 195        preload(c ## 1);                                         \
 196        encround_tail(a ## 2, b ## 2, c ## 2, d ## 2, RX1, RY1, prerotate);
 197
 198#define decrypt_round(n, a, b, c, d, preload, prerotate) \
 199        vbroadcastss (k+4*(2*(n)))(CTX),   RK1;                  \
 200        vbroadcastss (k+4*(2*(n)+1))(CTX), RK2;                  \
 201        round_head_2(a, b, RX0, RY0, RX1, RY1);                  \
 202        decround_tail(a ## 1, b ## 1, c ## 1, d ## 1, RX0, RY0, prerotate); \
 203        preload(c ## 1);                                         \
 204        decround_tail(a ## 2, b ## 2, c ## 2, d ## 2, RX1, RY1, prerotate);
 205
 206#define encrypt_cycle(n) \
 207        encrypt_round((2*n), RA, RB, RC, RD, preload_rgi, rotate_1l); \
 208        encrypt_round(((2*n) + 1), RC, RD, RA, RB, preload_rgi, rotate_1l);
 209
 210#define encrypt_cycle_last(n) \
 211        encrypt_round((2*n), RA, RB, RC, RD, preload_rgi, rotate_1l); \
 212        encrypt_round(((2*n) + 1), RC, RD, RA, RB, dummy, dummy);
 213
 214#define decrypt_cycle(n) \
 215        decrypt_round(((2*n) + 1), RC, RD, RA, RB, preload_rgi, rotate_1l); \
 216        decrypt_round((2*n), RA, RB, RC, RD, preload_rgi, rotate_1l);
 217
 218#define decrypt_cycle_last(n) \
 219        decrypt_round(((2*n) + 1), RC, RD, RA, RB, preload_rgi, rotate_1l); \
 220        decrypt_round((2*n), RA, RB, RC, RD, dummy, dummy);
 221
 222#define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
 223        vpunpckldq              x1, x0, t0; \
 224        vpunpckhdq              x1, x0, t2; \
 225        vpunpckldq              x3, x2, t1; \
 226        vpunpckhdq              x3, x2, x3; \
 227        \
 228        vpunpcklqdq             t1, t0, x0; \
 229        vpunpckhqdq             t1, t0, x1; \
 230        vpunpcklqdq             x3, t2, x2; \
 231        vpunpckhqdq             x3, t2, x3;
 232
 233#define inpack_blocks(x0, x1, x2, x3, wkey, t0, t1, t2) \
 234        vpxor           x0, wkey, x0; \
 235        vpxor           x1, wkey, x1; \
 236        vpxor           x2, wkey, x2; \
 237        vpxor           x3, wkey, x3; \
 238        \
 239        transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
 240
 241#define outunpack_blocks(x0, x1, x2, x3, wkey, t0, t1, t2) \
 242        transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
 243        \
 244        vpxor           x0, wkey, x0; \
 245        vpxor           x1, wkey, x1; \
 246        vpxor           x2, wkey, x2; \
 247        vpxor           x3, wkey, x3;
 248
 249.align 8
 250__twofish_enc_blk8:
 251        /* input:
 252         *      %rdi: ctx, CTX
 253         *      RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks
 254         * output:
 255         *      RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2: encrypted blocks
 256         */
 257
 258        vmovdqu w(CTX), RK1;
 259
 260        pushq %rbp;
 261        pushq %rbx;
 262        pushq %rcx;
 263
 264        inpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
 265        preload_rgi(RA1);
 266        rotate_1l(RD1);
 267        inpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
 268        rotate_1l(RD2);
 269
 270        encrypt_cycle(0);
 271        encrypt_cycle(1);
 272        encrypt_cycle(2);
 273        encrypt_cycle(3);
 274        encrypt_cycle(4);
 275        encrypt_cycle(5);
 276        encrypt_cycle(6);
 277        encrypt_cycle_last(7);
 278
 279        vmovdqu (w+4*4)(CTX), RK1;
 280
 281        popq %rcx;
 282        popq %rbx;
 283        popq %rbp;
 284
 285        outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
 286        outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
 287
 288        ret;
 289ENDPROC(__twofish_enc_blk8)
 290
 291.align 8
 292__twofish_dec_blk8:
 293        /* input:
 294         *      %rdi: ctx, CTX
 295         *      RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2: encrypted blocks
 296         * output:
 297         *      RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: decrypted blocks
 298         */
 299
 300        vmovdqu (w+4*4)(CTX), RK1;
 301
 302        pushq %rbp;
 303        pushq %rbx;
 304
 305        inpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
 306        preload_rgi(RC1);
 307        rotate_1l(RA1);
 308        inpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
 309        rotate_1l(RA2);
 310
 311        decrypt_cycle(7);
 312        decrypt_cycle(6);
 313        decrypt_cycle(5);
 314        decrypt_cycle(4);
 315        decrypt_cycle(3);
 316        decrypt_cycle(2);
 317        decrypt_cycle(1);
 318        decrypt_cycle_last(0);
 319
 320        vmovdqu (w)(CTX), RK1;
 321
 322        popq %rbx;
 323        popq %rbp;
 324
 325        outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
 326        outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
 327
 328        ret;
 329ENDPROC(__twofish_dec_blk8)
 330
 331ENTRY(twofish_ecb_enc_8way)
 332        /* input:
 333         *      %rdi: ctx, CTX
 334         *      %rsi: dst
 335         *      %rdx: src
 336         */
 337        FRAME_BEGIN
 338
 339        movq %rsi, %r11;
 340
 341        load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 342
 343        call __twofish_enc_blk8;
 344
 345        store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
 346
 347        FRAME_END
 348        ret;
 349ENDPROC(twofish_ecb_enc_8way)
 350
 351ENTRY(twofish_ecb_dec_8way)
 352        /* input:
 353         *      %rdi: ctx, CTX
 354         *      %rsi: dst
 355         *      %rdx: src
 356         */
 357        FRAME_BEGIN
 358
 359        movq %rsi, %r11;
 360
 361        load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
 362
 363        call __twofish_dec_blk8;
 364
 365        store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 366
 367        FRAME_END
 368        ret;
 369ENDPROC(twofish_ecb_dec_8way)
 370
 371ENTRY(twofish_cbc_dec_8way)
 372        /* input:
 373         *      %rdi: ctx, CTX
 374         *      %rsi: dst
 375         *      %rdx: src
 376         */
 377        FRAME_BEGIN
 378
 379        pushq %r12;
 380
 381        movq %rsi, %r11;
 382        movq %rdx, %r12;
 383
 384        load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
 385
 386        call __twofish_dec_blk8;
 387
 388        store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 389
 390        popq %r12;
 391
 392        FRAME_END
 393        ret;
 394ENDPROC(twofish_cbc_dec_8way)
 395
 396ENTRY(twofish_ctr_8way)
 397        /* input:
 398         *      %rdi: ctx, CTX
 399         *      %rsi: dst
 400         *      %rdx: src
 401         *      %rcx: iv (little endian, 128bit)
 402         */
 403        FRAME_BEGIN
 404
 405        pushq %r12;
 406
 407        movq %rsi, %r11;
 408        movq %rdx, %r12;
 409
 410        load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
 411                      RD2, RX0, RX1, RY0);
 412
 413        call __twofish_enc_blk8;
 414
 415        store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
 416
 417        popq %r12;
 418
 419        FRAME_END
 420        ret;
 421ENDPROC(twofish_ctr_8way)
 422
 423ENTRY(twofish_xts_enc_8way)
 424        /* input:
 425         *      %rdi: ctx, CTX
 426         *      %rsi: dst
 427         *      %rdx: src
 428         *      %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
 429         */
 430        FRAME_BEGIN
 431
 432        movq %rsi, %r11;
 433
 434        /* regs <= src, dst <= IVs, regs <= regs xor IVs */
 435        load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
 436                      RX0, RX1, RY0, .Lxts_gf128mul_and_shl1_mask);
 437
 438        call __twofish_enc_blk8;
 439
 440        /* dst <= regs xor IVs(in dst) */
 441        store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
 442
 443        FRAME_END
 444        ret;
 445ENDPROC(twofish_xts_enc_8way)
 446
 447ENTRY(twofish_xts_dec_8way)
 448        /* input:
 449         *      %rdi: ctx, CTX
 450         *      %rsi: dst
 451         *      %rdx: src
 452         *      %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
 453         */
 454        FRAME_BEGIN
 455
 456        movq %rsi, %r11;
 457
 458        /* regs <= src, dst <= IVs, regs <= regs xor IVs */
 459        load_xts_8way(%rcx, %rdx, %rsi, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2,
 460                      RX0, RX1, RY0, .Lxts_gf128mul_and_shl1_mask);
 461
 462        call __twofish_dec_blk8;
 463
 464        /* dst <= regs xor IVs(in dst) */
 465        store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 466
 467        FRAME_END
 468        ret;
 469ENDPROC(twofish_xts_dec_8way)
 470