linux/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
<<
>>
Prefs
   1/*
   2 * Cast5 Cipher 16-way parallel algorithm (AVX/x86_64)
   3 *
   4 * Copyright (C) 2012 Johannes Goetzfried
   5 *     <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
   6 *
   7 * Copyright © 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License as published by
  11 * the Free Software Foundation; either version 2 of the License, or
  12 * (at your option) any later version.
  13 *
  14 * This program is distributed in the hope that it will be useful,
  15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  17 * GNU General Public License for more details.
  18 *
  19 * You should have received a copy of the GNU General Public License
  20 * along with this program; if not, write to the Free Software
  21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
  22 * USA
  23 *
  24 */
  25
  26#include <linux/linkage.h>
  27#include <asm/frame.h>
  28
  29.file "cast5-avx-x86_64-asm_64.S"
  30
  31.extern cast_s1
  32.extern cast_s2
  33.extern cast_s3
  34.extern cast_s4
  35
  36/* structure of crypto context */
  37#define km      0
  38#define kr      (16*4)
  39#define rr      ((16*4)+16)
  40
  41/* s-boxes */
  42#define s1      cast_s1
  43#define s2      cast_s2
  44#define s3      cast_s3
  45#define s4      cast_s4
  46
  47/**********************************************************************
  48  16-way AVX cast5
  49 **********************************************************************/
  50#define CTX %rdi
  51
  52#define RL1 %xmm0
  53#define RR1 %xmm1
  54#define RL2 %xmm2
  55#define RR2 %xmm3
  56#define RL3 %xmm4
  57#define RR3 %xmm5
  58#define RL4 %xmm6
  59#define RR4 %xmm7
  60
  61#define RX %xmm8
  62
  63#define RKM  %xmm9
  64#define RKR  %xmm10
  65#define RKRF %xmm11
  66#define RKRR %xmm12
  67
  68#define R32  %xmm13
  69#define R1ST %xmm14
  70
  71#define RTMP %xmm15
  72
  73#define RID1  %rbp
  74#define RID1d %ebp
  75#define RID2  %rsi
  76#define RID2d %esi
  77
  78#define RGI1   %rdx
  79#define RGI1bl %dl
  80#define RGI1bh %dh
  81#define RGI2   %rcx
  82#define RGI2bl %cl
  83#define RGI2bh %ch
  84
  85#define RGI3   %rax
  86#define RGI3bl %al
  87#define RGI3bh %ah
  88#define RGI4   %rbx
  89#define RGI4bl %bl
  90#define RGI4bh %bh
  91
  92#define RFS1  %r8
  93#define RFS1d %r8d
  94#define RFS2  %r9
  95#define RFS2d %r9d
  96#define RFS3  %r10
  97#define RFS3d %r10d
  98
  99
 100#define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \
 101        movzbl          src ## bh,     RID1d;    \
 102        movzbl          src ## bl,     RID2d;    \
 103        shrq $16,       src;                     \
 104        movl            s1(, RID1, 4), dst ## d; \
 105        op1             s2(, RID2, 4), dst ## d; \
 106        movzbl          src ## bh,     RID1d;    \
 107        movzbl          src ## bl,     RID2d;    \
 108        interleave_op(il_reg);                   \
 109        op2             s3(, RID1, 4), dst ## d; \
 110        op3             s4(, RID2, 4), dst ## d;
 111
 112#define dummy(d) /* do nothing */
 113
 114#define shr_next(reg) \
 115        shrq $16,       reg;
 116
 117#define F_head(a, x, gi1, gi2, op0) \
 118        op0     a,      RKM,  x;                 \
 119        vpslld  RKRF,   x,    RTMP;              \
 120        vpsrld  RKRR,   x,    x;                 \
 121        vpor    RTMP,   x,    x;                 \
 122        \
 123        vmovq           x,    gi1;               \
 124        vpextrq $1,     x,    gi2;
 125
 126#define F_tail(a, x, gi1, gi2, op1, op2, op3) \
 127        lookup_32bit(##gi1, RFS1, op1, op2, op3, shr_next, ##gi1); \
 128        lookup_32bit(##gi2, RFS3, op1, op2, op3, shr_next, ##gi2); \
 129        \
 130        lookup_32bit(##gi1, RFS2, op1, op2, op3, dummy, none);     \
 131        shlq $32,       RFS2;                                      \
 132        orq             RFS1, RFS2;                                \
 133        lookup_32bit(##gi2, RFS1, op1, op2, op3, dummy, none);     \
 134        shlq $32,       RFS1;                                      \
 135        orq             RFS1, RFS3;                                \
 136        \
 137        vmovq           RFS2, x;                                   \
 138        vpinsrq $1,     RFS3, x, x;
 139
 140#define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \
 141        F_head(b1, RX, RGI1, RGI2, op0);              \
 142        F_head(b2, RX, RGI3, RGI4, op0);              \
 143        \
 144        F_tail(b1, RX, RGI1, RGI2, op1, op2, op3);    \
 145        F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3);  \
 146        \
 147        vpxor           a1, RX,   a1;                 \
 148        vpxor           a2, RTMP, a2;
 149
 150#define F1_2(a1, b1, a2, b2) \
 151        F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl)
 152#define F2_2(a1, b1, a2, b2) \
 153        F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl)
 154#define F3_2(a1, b1, a2, b2) \
 155        F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
 156
 157#define subround(a1, b1, a2, b2, f) \
 158        F ## f ## _2(a1, b1, a2, b2);
 159
 160#define round(l, r, n, f) \
 161        vbroadcastss    (km+(4*n))(CTX), RKM;        \
 162        vpand           R1ST,            RKR,  RKRF; \
 163        vpsubq          RKRF,            R32,  RKRR; \
 164        vpsrldq $1,     RKR,             RKR;        \
 165        subround(l ## 1, r ## 1, l ## 2, r ## 2, f); \
 166        subround(l ## 3, r ## 3, l ## 4, r ## 4, f);
 167
 168#define enc_preload_rkr() \
 169        vbroadcastss    .L16_mask,                RKR;      \
 170        /* add 16-bit rotation to key rotations (mod 32) */ \
 171        vpxor           kr(CTX),                  RKR, RKR;
 172
 173#define dec_preload_rkr() \
 174        vbroadcastss    .L16_mask,                RKR;      \
 175        /* add 16-bit rotation to key rotations (mod 32) */ \
 176        vpxor           kr(CTX),                  RKR, RKR; \
 177        vpshufb         .Lbswap128_mask,          RKR, RKR;
 178
 179#define transpose_2x4(x0, x1, t0, t1) \
 180        vpunpckldq              x1, x0, t0; \
 181        vpunpckhdq              x1, x0, t1; \
 182        \
 183        vpunpcklqdq             t1, t0, x0; \
 184        vpunpckhqdq             t1, t0, x1;
 185
 186#define inpack_blocks(x0, x1, t0, t1, rmask) \
 187        vpshufb rmask,  x0,     x0; \
 188        vpshufb rmask,  x1,     x1; \
 189        \
 190        transpose_2x4(x0, x1, t0, t1)
 191
 192#define outunpack_blocks(x0, x1, t0, t1, rmask) \
 193        transpose_2x4(x0, x1, t0, t1) \
 194        \
 195        vpshufb rmask,  x0, x0;           \
 196        vpshufb rmask,  x1, x1;
 197
 198.data
 199
 200.align 16
 201.Lbswap_mask:
 202        .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
 203.Lbswap128_mask:
 204        .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
 205.Lbswap_iv_mask:
 206        .byte 7, 6, 5, 4, 3, 2, 1, 0, 7, 6, 5, 4, 3, 2, 1, 0
 207.L16_mask:
 208        .byte 16, 16, 16, 16
 209.L32_mask:
 210        .byte 32, 0, 0, 0
 211.Lfirst_mask:
 212        .byte 0x1f, 0, 0, 0
 213
 214.text
 215
 216.align 16
 217__cast5_enc_blk16:
 218        /* input:
 219         *      %rdi: ctx, CTX
 220         *      RL1: blocks 1 and 2
 221         *      RR1: blocks 3 and 4
 222         *      RL2: blocks 5 and 6
 223         *      RR2: blocks 7 and 8
 224         *      RL3: blocks 9 and 10
 225         *      RR3: blocks 11 and 12
 226         *      RL4: blocks 13 and 14
 227         *      RR4: blocks 15 and 16
 228         * output:
 229         *      RL1: encrypted blocks 1 and 2
 230         *      RR1: encrypted blocks 3 and 4
 231         *      RL2: encrypted blocks 5 and 6
 232         *      RR2: encrypted blocks 7 and 8
 233         *      RL3: encrypted blocks 9 and 10
 234         *      RR3: encrypted blocks 11 and 12
 235         *      RL4: encrypted blocks 13 and 14
 236         *      RR4: encrypted blocks 15 and 16
 237         */
 238
 239        pushq %rbp;
 240        pushq %rbx;
 241
 242        vmovdqa .Lbswap_mask, RKM;
 243        vmovd .Lfirst_mask, R1ST;
 244        vmovd .L32_mask, R32;
 245        enc_preload_rkr();
 246
 247        inpack_blocks(RL1, RR1, RTMP, RX, RKM);
 248        inpack_blocks(RL2, RR2, RTMP, RX, RKM);
 249        inpack_blocks(RL3, RR3, RTMP, RX, RKM);
 250        inpack_blocks(RL4, RR4, RTMP, RX, RKM);
 251
 252        round(RL, RR, 0, 1);
 253        round(RR, RL, 1, 2);
 254        round(RL, RR, 2, 3);
 255        round(RR, RL, 3, 1);
 256        round(RL, RR, 4, 2);
 257        round(RR, RL, 5, 3);
 258        round(RL, RR, 6, 1);
 259        round(RR, RL, 7, 2);
 260        round(RL, RR, 8, 3);
 261        round(RR, RL, 9, 1);
 262        round(RL, RR, 10, 2);
 263        round(RR, RL, 11, 3);
 264
 265        movzbl rr(CTX), %eax;
 266        testl %eax, %eax;
 267        jnz .L__skip_enc;
 268
 269        round(RL, RR, 12, 1);
 270        round(RR, RL, 13, 2);
 271        round(RL, RR, 14, 3);
 272        round(RR, RL, 15, 1);
 273
 274.L__skip_enc:
 275        popq %rbx;
 276        popq %rbp;
 277
 278        vmovdqa .Lbswap_mask, RKM;
 279
 280        outunpack_blocks(RR1, RL1, RTMP, RX, RKM);
 281        outunpack_blocks(RR2, RL2, RTMP, RX, RKM);
 282        outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
 283        outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
 284
 285        ret;
 286ENDPROC(__cast5_enc_blk16)
 287
 288.align 16
 289__cast5_dec_blk16:
 290        /* input:
 291         *      %rdi: ctx, CTX
 292         *      RL1: encrypted blocks 1 and 2
 293         *      RR1: encrypted blocks 3 and 4
 294         *      RL2: encrypted blocks 5 and 6
 295         *      RR2: encrypted blocks 7 and 8
 296         *      RL3: encrypted blocks 9 and 10
 297         *      RR3: encrypted blocks 11 and 12
 298         *      RL4: encrypted blocks 13 and 14
 299         *      RR4: encrypted blocks 15 and 16
 300         * output:
 301         *      RL1: decrypted blocks 1 and 2
 302         *      RR1: decrypted blocks 3 and 4
 303         *      RL2: decrypted blocks 5 and 6
 304         *      RR2: decrypted blocks 7 and 8
 305         *      RL3: decrypted blocks 9 and 10
 306         *      RR3: decrypted blocks 11 and 12
 307         *      RL4: decrypted blocks 13 and 14
 308         *      RR4: decrypted blocks 15 and 16
 309         */
 310
 311        pushq %rbp;
 312        pushq %rbx;
 313
 314        vmovdqa .Lbswap_mask, RKM;
 315        vmovd .Lfirst_mask, R1ST;
 316        vmovd .L32_mask, R32;
 317        dec_preload_rkr();
 318
 319        inpack_blocks(RL1, RR1, RTMP, RX, RKM);
 320        inpack_blocks(RL2, RR2, RTMP, RX, RKM);
 321        inpack_blocks(RL3, RR3, RTMP, RX, RKM);
 322        inpack_blocks(RL4, RR4, RTMP, RX, RKM);
 323
 324        movzbl rr(CTX), %eax;
 325        testl %eax, %eax;
 326        jnz .L__skip_dec;
 327
 328        round(RL, RR, 15, 1);
 329        round(RR, RL, 14, 3);
 330        round(RL, RR, 13, 2);
 331        round(RR, RL, 12, 1);
 332
 333.L__dec_tail:
 334        round(RL, RR, 11, 3);
 335        round(RR, RL, 10, 2);
 336        round(RL, RR, 9, 1);
 337        round(RR, RL, 8, 3);
 338        round(RL, RR, 7, 2);
 339        round(RR, RL, 6, 1);
 340        round(RL, RR, 5, 3);
 341        round(RR, RL, 4, 2);
 342        round(RL, RR, 3, 1);
 343        round(RR, RL, 2, 3);
 344        round(RL, RR, 1, 2);
 345        round(RR, RL, 0, 1);
 346
 347        vmovdqa .Lbswap_mask, RKM;
 348        popq %rbx;
 349        popq %rbp;
 350
 351        outunpack_blocks(RR1, RL1, RTMP, RX, RKM);
 352        outunpack_blocks(RR2, RL2, RTMP, RX, RKM);
 353        outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
 354        outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
 355
 356        ret;
 357
 358.L__skip_dec:
 359        vpsrldq $4, RKR, RKR;
 360        jmp .L__dec_tail;
 361ENDPROC(__cast5_dec_blk16)
 362
 363ENTRY(cast5_ecb_enc_16way)
 364        /* input:
 365         *      %rdi: ctx, CTX
 366         *      %rsi: dst
 367         *      %rdx: src
 368         */
 369        FRAME_BEGIN
 370
 371        movq %rsi, %r11;
 372
 373        vmovdqu (0*4*4)(%rdx), RL1;
 374        vmovdqu (1*4*4)(%rdx), RR1;
 375        vmovdqu (2*4*4)(%rdx), RL2;
 376        vmovdqu (3*4*4)(%rdx), RR2;
 377        vmovdqu (4*4*4)(%rdx), RL3;
 378        vmovdqu (5*4*4)(%rdx), RR3;
 379        vmovdqu (6*4*4)(%rdx), RL4;
 380        vmovdqu (7*4*4)(%rdx), RR4;
 381
 382        call __cast5_enc_blk16;
 383
 384        vmovdqu RR1, (0*4*4)(%r11);
 385        vmovdqu RL1, (1*4*4)(%r11);
 386        vmovdqu RR2, (2*4*4)(%r11);
 387        vmovdqu RL2, (3*4*4)(%r11);
 388        vmovdqu RR3, (4*4*4)(%r11);
 389        vmovdqu RL3, (5*4*4)(%r11);
 390        vmovdqu RR4, (6*4*4)(%r11);
 391        vmovdqu RL4, (7*4*4)(%r11);
 392
 393        FRAME_END
 394        ret;
 395ENDPROC(cast5_ecb_enc_16way)
 396
 397ENTRY(cast5_ecb_dec_16way)
 398        /* input:
 399         *      %rdi: ctx, CTX
 400         *      %rsi: dst
 401         *      %rdx: src
 402         */
 403
 404        FRAME_BEGIN
 405        movq %rsi, %r11;
 406
 407        vmovdqu (0*4*4)(%rdx), RL1;
 408        vmovdqu (1*4*4)(%rdx), RR1;
 409        vmovdqu (2*4*4)(%rdx), RL2;
 410        vmovdqu (3*4*4)(%rdx), RR2;
 411        vmovdqu (4*4*4)(%rdx), RL3;
 412        vmovdqu (5*4*4)(%rdx), RR3;
 413        vmovdqu (6*4*4)(%rdx), RL4;
 414        vmovdqu (7*4*4)(%rdx), RR4;
 415
 416        call __cast5_dec_blk16;
 417
 418        vmovdqu RR1, (0*4*4)(%r11);
 419        vmovdqu RL1, (1*4*4)(%r11);
 420        vmovdqu RR2, (2*4*4)(%r11);
 421        vmovdqu RL2, (3*4*4)(%r11);
 422        vmovdqu RR3, (4*4*4)(%r11);
 423        vmovdqu RL3, (5*4*4)(%r11);
 424        vmovdqu RR4, (6*4*4)(%r11);
 425        vmovdqu RL4, (7*4*4)(%r11);
 426
 427        FRAME_END
 428        ret;
 429ENDPROC(cast5_ecb_dec_16way)
 430
 431ENTRY(cast5_cbc_dec_16way)
 432        /* input:
 433         *      %rdi: ctx, CTX
 434         *      %rsi: dst
 435         *      %rdx: src
 436         */
 437        FRAME_BEGIN
 438
 439        pushq %r12;
 440
 441        movq %rsi, %r11;
 442        movq %rdx, %r12;
 443
 444        vmovdqu (0*16)(%rdx), RL1;
 445        vmovdqu (1*16)(%rdx), RR1;
 446        vmovdqu (2*16)(%rdx), RL2;
 447        vmovdqu (3*16)(%rdx), RR2;
 448        vmovdqu (4*16)(%rdx), RL3;
 449        vmovdqu (5*16)(%rdx), RR3;
 450        vmovdqu (6*16)(%rdx), RL4;
 451        vmovdqu (7*16)(%rdx), RR4;
 452
 453        call __cast5_dec_blk16;
 454
 455        /* xor with src */
 456        vmovq (%r12), RX;
 457        vpshufd $0x4f, RX, RX;
 458        vpxor RX, RR1, RR1;
 459        vpxor 0*16+8(%r12), RL1, RL1;
 460        vpxor 1*16+8(%r12), RR2, RR2;
 461        vpxor 2*16+8(%r12), RL2, RL2;
 462        vpxor 3*16+8(%r12), RR3, RR3;
 463        vpxor 4*16+8(%r12), RL3, RL3;
 464        vpxor 5*16+8(%r12), RR4, RR4;
 465        vpxor 6*16+8(%r12), RL4, RL4;
 466
 467        vmovdqu RR1, (0*16)(%r11);
 468        vmovdqu RL1, (1*16)(%r11);
 469        vmovdqu RR2, (2*16)(%r11);
 470        vmovdqu RL2, (3*16)(%r11);
 471        vmovdqu RR3, (4*16)(%r11);
 472        vmovdqu RL3, (5*16)(%r11);
 473        vmovdqu RR4, (6*16)(%r11);
 474        vmovdqu RL4, (7*16)(%r11);
 475
 476        popq %r12;
 477
 478        FRAME_END
 479        ret;
 480ENDPROC(cast5_cbc_dec_16way)
 481
 482ENTRY(cast5_ctr_16way)
 483        /* input:
 484         *      %rdi: ctx, CTX
 485         *      %rsi: dst
 486         *      %rdx: src
 487         *      %rcx: iv (big endian, 64bit)
 488         */
 489        FRAME_BEGIN
 490
 491        pushq %r12;
 492
 493        movq %rsi, %r11;
 494        movq %rdx, %r12;
 495
 496        vpcmpeqd RTMP, RTMP, RTMP;
 497        vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
 498
 499        vpcmpeqd RKR, RKR, RKR;
 500        vpaddq RKR, RKR, RKR; /* low: -2, high: -2 */
 501        vmovdqa .Lbswap_iv_mask, R1ST;
 502        vmovdqa .Lbswap128_mask, RKM;
 503
 504        /* load IV and byteswap */
 505        vmovq (%rcx), RX;
 506        vpshufb R1ST, RX, RX;
 507
 508        /* construct IVs */
 509        vpsubq RTMP, RX, RX;  /* le: IV1, IV0 */
 510        vpshufb RKM, RX, RL1; /* be: IV0, IV1 */
 511        vpsubq RKR, RX, RX;
 512        vpshufb RKM, RX, RR1; /* be: IV2, IV3 */
 513        vpsubq RKR, RX, RX;
 514        vpshufb RKM, RX, RL2; /* be: IV4, IV5 */
 515        vpsubq RKR, RX, RX;
 516        vpshufb RKM, RX, RR2; /* be: IV6, IV7 */
 517        vpsubq RKR, RX, RX;
 518        vpshufb RKM, RX, RL3; /* be: IV8, IV9 */
 519        vpsubq RKR, RX, RX;
 520        vpshufb RKM, RX, RR3; /* be: IV10, IV11 */
 521        vpsubq RKR, RX, RX;
 522        vpshufb RKM, RX, RL4; /* be: IV12, IV13 */
 523        vpsubq RKR, RX, RX;
 524        vpshufb RKM, RX, RR4; /* be: IV14, IV15 */
 525
 526        /* store last IV */
 527        vpsubq RTMP, RX, RX; /* le: IV16, IV14 */
 528        vpshufb R1ST, RX, RX; /* be: IV16, IV16 */
 529        vmovq RX, (%rcx);
 530
 531        call __cast5_enc_blk16;
 532
 533        /* dst = src ^ iv */
 534        vpxor (0*16)(%r12), RR1, RR1;
 535        vpxor (1*16)(%r12), RL1, RL1;
 536        vpxor (2*16)(%r12), RR2, RR2;
 537        vpxor (3*16)(%r12), RL2, RL2;
 538        vpxor (4*16)(%r12), RR3, RR3;
 539        vpxor (5*16)(%r12), RL3, RL3;
 540        vpxor (6*16)(%r12), RR4, RR4;
 541        vpxor (7*16)(%r12), RL4, RL4;
 542        vmovdqu RR1, (0*16)(%r11);
 543        vmovdqu RL1, (1*16)(%r11);
 544        vmovdqu RR2, (2*16)(%r11);
 545        vmovdqu RL2, (3*16)(%r11);
 546        vmovdqu RR3, (4*16)(%r11);
 547        vmovdqu RL3, (5*16)(%r11);
 548        vmovdqu RR4, (6*16)(%r11);
 549        vmovdqu RL4, (7*16)(%r11);
 550
 551        popq %r12;
 552
 553        FRAME_END
 554        ret;
 555ENDPROC(cast5_ctr_16way)
 556