linux/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
<<
>>
Prefs
   1/*
   2 * Cast6 Cipher 8-way parallel algorithm (AVX/x86_64)
   3 *
   4 * Copyright (C) 2012 Johannes Goetzfried
   5 *     <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
   6 *
   7 * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License as published by
  11 * the Free Software Foundation; either version 2 of the License, or
  12 * (at your option) any later version.
  13 *
  14 * This program is distributed in the hope that it will be useful,
  15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  17 * GNU General Public License for more details.
  18 *
  19 * You should have received a copy of the GNU General Public License
  20 * along with this program; if not, write to the Free Software
  21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
  22 * USA
  23 *
  24 */
  25
  26#include <linux/linkage.h>
  27#include <asm/frame.h>
  28#include "glue_helper-asm-avx.S"
  29
  30.file "cast6-avx-x86_64-asm_64.S"
  31
  32.extern cast_s1
  33.extern cast_s2
  34.extern cast_s3
  35.extern cast_s4
  36
  37/* structure of crypto context */
  38#define km      0
  39#define kr      (12*4*4)
  40
  41/* s-boxes */
  42#define s1      cast_s1
  43#define s2      cast_s2
  44#define s3      cast_s3
  45#define s4      cast_s4
  46
  47/**********************************************************************
  48  8-way AVX cast6
  49 **********************************************************************/
  50#define CTX %r15
  51
  52#define RA1 %xmm0
  53#define RB1 %xmm1
  54#define RC1 %xmm2
  55#define RD1 %xmm3
  56
  57#define RA2 %xmm4
  58#define RB2 %xmm5
  59#define RC2 %xmm6
  60#define RD2 %xmm7
  61
  62#define RX  %xmm8
  63
  64#define RKM  %xmm9
  65#define RKR  %xmm10
  66#define RKRF %xmm11
  67#define RKRR %xmm12
  68#define R32  %xmm13
  69#define R1ST %xmm14
  70
  71#define RTMP %xmm15
  72
  73#define RID1  %rdi
  74#define RID1d %edi
  75#define RID2  %rsi
  76#define RID2d %esi
  77
  78#define RGI1   %rdx
  79#define RGI1bl %dl
  80#define RGI1bh %dh
  81#define RGI2   %rcx
  82#define RGI2bl %cl
  83#define RGI2bh %ch
  84
  85#define RGI3   %rax
  86#define RGI3bl %al
  87#define RGI3bh %ah
  88#define RGI4   %rbx
  89#define RGI4bl %bl
  90#define RGI4bh %bh
  91
  92#define RFS1  %r8
  93#define RFS1d %r8d
  94#define RFS2  %r9
  95#define RFS2d %r9d
  96#define RFS3  %r10
  97#define RFS3d %r10d
  98
  99
 100#define lookup_32bit(src, dst, op1, op2, op3, interleave_op, il_reg) \
 101        movzbl          src ## bh,     RID1d;    \
 102        movzbl          src ## bl,     RID2d;    \
 103        shrq $16,       src;                     \
 104        movl            s1(, RID1, 4), dst ## d; \
 105        op1             s2(, RID2, 4), dst ## d; \
 106        movzbl          src ## bh,     RID1d;    \
 107        movzbl          src ## bl,     RID2d;    \
 108        interleave_op(il_reg);                   \
 109        op2             s3(, RID1, 4), dst ## d; \
 110        op3             s4(, RID2, 4), dst ## d;
 111
 112#define dummy(d) /* do nothing */
 113
 114#define shr_next(reg) \
 115        shrq $16,       reg;
 116
 117#define F_head(a, x, gi1, gi2, op0) \
 118        op0     a,      RKM,  x;                 \
 119        vpslld  RKRF,   x,    RTMP;              \
 120        vpsrld  RKRR,   x,    x;                 \
 121        vpor    RTMP,   x,    x;                 \
 122        \
 123        vmovq           x,    gi1;               \
 124        vpextrq $1,     x,    gi2;
 125
 126#define F_tail(a, x, gi1, gi2, op1, op2, op3) \
 127        lookup_32bit(##gi1, RFS1, op1, op2, op3, shr_next, ##gi1); \
 128        lookup_32bit(##gi2, RFS3, op1, op2, op3, shr_next, ##gi2); \
 129        \
 130        lookup_32bit(##gi1, RFS2, op1, op2, op3, dummy, none);     \
 131        shlq $32,       RFS2;                                      \
 132        orq             RFS1, RFS2;                                \
 133        lookup_32bit(##gi2, RFS1, op1, op2, op3, dummy, none);     \
 134        shlq $32,       RFS1;                                      \
 135        orq             RFS1, RFS3;                                \
 136        \
 137        vmovq           RFS2, x;                                   \
 138        vpinsrq $1,     RFS3, x, x;
 139
 140#define F_2(a1, b1, a2, b2, op0, op1, op2, op3) \
 141        F_head(b1, RX, RGI1, RGI2, op0);              \
 142        F_head(b2, RX, RGI3, RGI4, op0);              \
 143        \
 144        F_tail(b1, RX, RGI1, RGI2, op1, op2, op3);    \
 145        F_tail(b2, RTMP, RGI3, RGI4, op1, op2, op3);  \
 146        \
 147        vpxor           a1, RX,   a1;                 \
 148        vpxor           a2, RTMP, a2;
 149
 150#define F1_2(a1, b1, a2, b2) \
 151        F_2(a1, b1, a2, b2, vpaddd, xorl, subl, addl)
 152#define F2_2(a1, b1, a2, b2) \
 153        F_2(a1, b1, a2, b2, vpxor, subl, addl, xorl)
 154#define F3_2(a1, b1, a2, b2) \
 155        F_2(a1, b1, a2, b2, vpsubd, addl, xorl, subl)
 156
 157#define qop(in, out, f) \
 158        F ## f ## _2(out ## 1, in ## 1, out ## 2, in ## 2);
 159
 160#define get_round_keys(nn) \
 161        vbroadcastss    (km+(4*(nn)))(CTX), RKM;        \
 162        vpand           R1ST,               RKR,  RKRF; \
 163        vpsubq          RKRF,               R32,  RKRR; \
 164        vpsrldq $1,     RKR,                RKR;
 165
 166#define Q(n) \
 167        get_round_keys(4*n+0); \
 168        qop(RD, RC, 1);        \
 169        \
 170        get_round_keys(4*n+1); \
 171        qop(RC, RB, 2);        \
 172        \
 173        get_round_keys(4*n+2); \
 174        qop(RB, RA, 3);        \
 175        \
 176        get_round_keys(4*n+3); \
 177        qop(RA, RD, 1);
 178
 179#define QBAR(n) \
 180        get_round_keys(4*n+3); \
 181        qop(RA, RD, 1);        \
 182        \
 183        get_round_keys(4*n+2); \
 184        qop(RB, RA, 3);        \
 185        \
 186        get_round_keys(4*n+1); \
 187        qop(RC, RB, 2);        \
 188        \
 189        get_round_keys(4*n+0); \
 190        qop(RD, RC, 1);
 191
 192#define shuffle(mask) \
 193        vpshufb         mask,            RKR, RKR;
 194
 195#define preload_rkr(n, do_mask, mask) \
 196        vbroadcastss    .L16_mask,                RKR;      \
 197        /* add 16-bit rotation to key rotations (mod 32) */ \
 198        vpxor           (kr+n*16)(CTX),           RKR, RKR; \
 199        do_mask(mask);
 200
 201#define transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
 202        vpunpckldq              x1, x0, t0; \
 203        vpunpckhdq              x1, x0, t2; \
 204        vpunpckldq              x3, x2, t1; \
 205        vpunpckhdq              x3, x2, x3; \
 206        \
 207        vpunpcklqdq             t1, t0, x0; \
 208        vpunpckhqdq             t1, t0, x1; \
 209        vpunpcklqdq             x3, t2, x2; \
 210        vpunpckhqdq             x3, t2, x3;
 211
 212#define inpack_blocks(x0, x1, x2, x3, t0, t1, t2, rmask) \
 213        vpshufb rmask, x0,      x0; \
 214        vpshufb rmask, x1,      x1; \
 215        vpshufb rmask, x2,      x2; \
 216        vpshufb rmask, x3,      x3; \
 217        \
 218        transpose_4x4(x0, x1, x2, x3, t0, t1, t2)
 219
 220#define outunpack_blocks(x0, x1, x2, x3, t0, t1, t2, rmask) \
 221        transpose_4x4(x0, x1, x2, x3, t0, t1, t2) \
 222        \
 223        vpshufb rmask,          x0, x0;       \
 224        vpshufb rmask,          x1, x1;       \
 225        vpshufb rmask,          x2, x2;       \
 226        vpshufb rmask,          x3, x3;
 227
 228.section        .rodata.cst16, "aM", @progbits, 16
 229.align 16
 230.Lxts_gf128mul_and_shl1_mask:
 231        .byte 0x87, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
 232.Lbswap_mask:
 233        .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
 234.Lbswap128_mask:
 235        .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
 236.Lrkr_enc_Q_Q_QBAR_QBAR:
 237        .byte 0, 1, 2, 3, 4, 5, 6, 7, 11, 10, 9, 8, 15, 14, 13, 12
 238.Lrkr_enc_QBAR_QBAR_QBAR_QBAR:
 239        .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
 240.Lrkr_dec_Q_Q_Q_Q:
 241        .byte 12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3
 242.Lrkr_dec_Q_Q_QBAR_QBAR:
 243        .byte 12, 13, 14, 15, 8, 9, 10, 11, 7, 6, 5, 4, 3, 2, 1, 0
 244.Lrkr_dec_QBAR_QBAR_QBAR_QBAR:
 245        .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
 246
 247.section        .rodata.cst4.L16_mask, "aM", @progbits, 4
 248.align 4
 249.L16_mask:
 250        .byte 16, 16, 16, 16
 251
 252.section        .rodata.cst4.L32_mask, "aM", @progbits, 4
 253.align 4
 254.L32_mask:
 255        .byte 32, 0, 0, 0
 256
 257.section        .rodata.cst4.first_mask, "aM", @progbits, 4
 258.align 4
 259.Lfirst_mask:
 260        .byte 0x1f, 0, 0, 0
 261
 262.text
 263
 264.align 8
 265__cast6_enc_blk8:
 266        /* input:
 267         *      %rdi: ctx
 268         *      RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks
 269         * output:
 270         *      RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks
 271         */
 272
 273        pushq %r15;
 274        pushq %rbx;
 275
 276        movq %rdi, CTX;
 277
 278        vmovdqa .Lbswap_mask, RKM;
 279        vmovd .Lfirst_mask, R1ST;
 280        vmovd .L32_mask, R32;
 281
 282        inpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
 283        inpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
 284
 285        preload_rkr(0, dummy, none);
 286        Q(0);
 287        Q(1);
 288        Q(2);
 289        Q(3);
 290        preload_rkr(1, shuffle, .Lrkr_enc_Q_Q_QBAR_QBAR);
 291        Q(4);
 292        Q(5);
 293        QBAR(6);
 294        QBAR(7);
 295        preload_rkr(2, shuffle, .Lrkr_enc_QBAR_QBAR_QBAR_QBAR);
 296        QBAR(8);
 297        QBAR(9);
 298        QBAR(10);
 299        QBAR(11);
 300
 301        popq %rbx;
 302        popq %r15;
 303
 304        vmovdqa .Lbswap_mask, RKM;
 305
 306        outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
 307        outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
 308
 309        ret;
 310ENDPROC(__cast6_enc_blk8)
 311
 312.align 8
 313__cast6_dec_blk8:
 314        /* input:
 315         *      %rdi: ctx
 316         *      RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks
 317         * output:
 318         *      RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: decrypted blocks
 319         */
 320
 321        pushq %r15;
 322        pushq %rbx;
 323
 324        movq %rdi, CTX;
 325
 326        vmovdqa .Lbswap_mask, RKM;
 327        vmovd .Lfirst_mask, R1ST;
 328        vmovd .L32_mask, R32;
 329
 330        inpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
 331        inpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
 332
 333        preload_rkr(2, shuffle, .Lrkr_dec_Q_Q_Q_Q);
 334        Q(11);
 335        Q(10);
 336        Q(9);
 337        Q(8);
 338        preload_rkr(1, shuffle, .Lrkr_dec_Q_Q_QBAR_QBAR);
 339        Q(7);
 340        Q(6);
 341        QBAR(5);
 342        QBAR(4);
 343        preload_rkr(0, shuffle, .Lrkr_dec_QBAR_QBAR_QBAR_QBAR);
 344        QBAR(3);
 345        QBAR(2);
 346        QBAR(1);
 347        QBAR(0);
 348
 349        popq %rbx;
 350        popq %r15;
 351
 352        vmovdqa .Lbswap_mask, RKM;
 353        outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
 354        outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
 355
 356        ret;
 357ENDPROC(__cast6_dec_blk8)
 358
 359ENTRY(cast6_ecb_enc_8way)
 360        /* input:
 361         *      %rdi: ctx
 362         *      %rsi: dst
 363         *      %rdx: src
 364         */
 365        FRAME_BEGIN
 366        pushq %r15;
 367
 368        movq %rdi, CTX;
 369        movq %rsi, %r11;
 370
 371        load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 372
 373        call __cast6_enc_blk8;
 374
 375        store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 376
 377        popq %r15;
 378        FRAME_END
 379        ret;
 380ENDPROC(cast6_ecb_enc_8way)
 381
 382ENTRY(cast6_ecb_dec_8way)
 383        /* input:
 384         *      %rdi: ctx
 385         *      %rsi: dst
 386         *      %rdx: src
 387         */
 388        FRAME_BEGIN
 389        pushq %r15;
 390
 391        movq %rdi, CTX;
 392        movq %rsi, %r11;
 393
 394        load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 395
 396        call __cast6_dec_blk8;
 397
 398        store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 399
 400        popq %r15;
 401        FRAME_END
 402        ret;
 403ENDPROC(cast6_ecb_dec_8way)
 404
 405ENTRY(cast6_cbc_dec_8way)
 406        /* input:
 407         *      %rdi: ctx
 408         *      %rsi: dst
 409         *      %rdx: src
 410         */
 411        FRAME_BEGIN
 412        pushq %r12;
 413        pushq %r15;
 414
 415        movq %rdi, CTX;
 416        movq %rsi, %r11;
 417        movq %rdx, %r12;
 418
 419        load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 420
 421        call __cast6_dec_blk8;
 422
 423        store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 424
 425        popq %r15;
 426        popq %r12;
 427        FRAME_END
 428        ret;
 429ENDPROC(cast6_cbc_dec_8way)
 430
 431ENTRY(cast6_ctr_8way)
 432        /* input:
 433         *      %rdi: ctx, CTX
 434         *      %rsi: dst
 435         *      %rdx: src
 436         *      %rcx: iv (little endian, 128bit)
 437         */
 438        FRAME_BEGIN
 439        pushq %r12;
 440        pushq %r15
 441
 442        movq %rdi, CTX;
 443        movq %rsi, %r11;
 444        movq %rdx, %r12;
 445
 446        load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
 447                      RD2, RX, RKR, RKM);
 448
 449        call __cast6_enc_blk8;
 450
 451        store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 452
 453        popq %r15;
 454        popq %r12;
 455        FRAME_END
 456        ret;
 457ENDPROC(cast6_ctr_8way)
 458
 459ENTRY(cast6_xts_enc_8way)
 460        /* input:
 461         *      %rdi: ctx, CTX
 462         *      %rsi: dst
 463         *      %rdx: src
 464         *      %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
 465         */
 466        FRAME_BEGIN
 467        pushq %r15;
 468
 469        movq %rdi, CTX
 470        movq %rsi, %r11;
 471
 472        /* regs <= src, dst <= IVs, regs <= regs xor IVs */
 473        load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
 474                      RX, RKR, RKM, .Lxts_gf128mul_and_shl1_mask);
 475
 476        call __cast6_enc_blk8;
 477
 478        /* dst <= regs xor IVs(in dst) */
 479        store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 480
 481        popq %r15;
 482        FRAME_END
 483        ret;
 484ENDPROC(cast6_xts_enc_8way)
 485
 486ENTRY(cast6_xts_dec_8way)
 487        /* input:
 488         *      %rdi: ctx, CTX
 489         *      %rsi: dst
 490         *      %rdx: src
 491         *      %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
 492         */
 493        FRAME_BEGIN
 494        pushq %r15;
 495
 496        movq %rdi, CTX
 497        movq %rsi, %r11;
 498
 499        /* regs <= src, dst <= IVs, regs <= regs xor IVs */
 500        load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
 501                      RX, RKR, RKM, .Lxts_gf128mul_and_shl1_mask);
 502
 503        call __cast6_dec_blk8;
 504
 505        /* dst <= regs xor IVs(in dst) */
 506        store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
 507
 508        popq %r15;
 509        FRAME_END
 510        ret;
 511ENDPROC(cast6_xts_dec_8way)
 512