linux/arch/x86/crypto/sm3-avx-asm_64.S
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/*
   3 * SM3 AVX accelerated transform.
   4 * specified in: https://datatracker.ietf.org/doc/html/draft-sca-cfrg-sm3-02
   5 *
   6 * Copyright (C) 2021 Jussi Kivilinna <jussi.kivilinna@iki.fi>
   7 * Copyright (C) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
   8 */
   9
  10/* Based on SM3 AES/BMI2 accelerated work by libgcrypt at:
  11 *  https://gnupg.org/software/libgcrypt/index.html
  12 */
  13
  14#include <linux/linkage.h>
  15#include <asm/frame.h>
  16
  17/* Context structure */
  18
  19#define state_h0 0
  20#define state_h1 4
  21#define state_h2 8
  22#define state_h3 12
  23#define state_h4 16
  24#define state_h5 20
  25#define state_h6 24
  26#define state_h7 28
  27
  28/* Constants */
  29
  30/* Round constant macros */
  31
  32#define K0   2043430169  /* 0x79cc4519 */
  33#define K1   -208106958  /* 0xf3988a32 */
  34#define K2   -416213915  /* 0xe7311465 */
  35#define K3   -832427829  /* 0xce6228cb */
  36#define K4  -1664855657  /* 0x9cc45197 */
  37#define K5    965255983  /* 0x3988a32f */
  38#define K6   1930511966  /* 0x7311465e */
  39#define K7   -433943364  /* 0xe6228cbc */
  40#define K8   -867886727  /* 0xcc451979 */
  41#define K9  -1735773453  /* 0x988a32f3 */
  42#define K10   823420391  /* 0x311465e7 */
  43#define K11  1646840782  /* 0x6228cbce */
  44#define K12 -1001285732  /* 0xc451979c */
  45#define K13 -2002571463  /* 0x88a32f39 */
  46#define K14   289824371  /* 0x11465e73 */
  47#define K15   579648742  /* 0x228cbce6 */
  48#define K16 -1651869049  /* 0x9d8a7a87 */
  49#define K17   991229199  /* 0x3b14f50f */
  50#define K18  1982458398  /* 0x7629ea1e */
  51#define K19  -330050500  /* 0xec53d43c */
  52#define K20  -660100999  /* 0xd8a7a879 */
  53#define K21 -1320201997  /* 0xb14f50f3 */
  54#define K22  1654563303  /* 0x629ea1e7 */
  55#define K23  -985840690  /* 0xc53d43ce */
  56#define K24 -1971681379  /* 0x8a7a879d */
  57#define K25   351604539  /* 0x14f50f3b */
  58#define K26   703209078  /* 0x29ea1e76 */
  59#define K27  1406418156  /* 0x53d43cec */
  60#define K28 -1482130984  /* 0xa7a879d8 */
  61#define K29  1330705329  /* 0x4f50f3b1 */
  62#define K30 -1633556638  /* 0x9ea1e762 */
  63#define K31  1027854021  /* 0x3d43cec5 */
  64#define K32  2055708042  /* 0x7a879d8a */
  65#define K33  -183551212  /* 0xf50f3b14 */
  66#define K34  -367102423  /* 0xea1e7629 */
  67#define K35  -734204845  /* 0xd43cec53 */
  68#define K36 -1468409689  /* 0xa879d8a7 */
  69#define K37  1358147919  /* 0x50f3b14f */
  70#define K38 -1578671458  /* 0xa1e7629e */
  71#define K39  1137624381  /* 0x43cec53d */
  72#define K40 -2019718534  /* 0x879d8a7a */
  73#define K41   255530229  /* 0x0f3b14f5 */
  74#define K42   511060458  /* 0x1e7629ea */
  75#define K43  1022120916  /* 0x3cec53d4 */
  76#define K44  2044241832  /* 0x79d8a7a8 */
  77#define K45  -206483632  /* 0xf3b14f50 */
  78#define K46  -412967263  /* 0xe7629ea1 */
  79#define K47  -825934525  /* 0xcec53d43 */
  80#define K48 -1651869049  /* 0x9d8a7a87 */
  81#define K49   991229199  /* 0x3b14f50f */
  82#define K50  1982458398  /* 0x7629ea1e */
  83#define K51  -330050500  /* 0xec53d43c */
  84#define K52  -660100999  /* 0xd8a7a879 */
  85#define K53 -1320201997  /* 0xb14f50f3 */
  86#define K54  1654563303  /* 0x629ea1e7 */
  87#define K55  -985840690  /* 0xc53d43ce */
  88#define K56 -1971681379  /* 0x8a7a879d */
  89#define K57   351604539  /* 0x14f50f3b */
  90#define K58   703209078  /* 0x29ea1e76 */
  91#define K59  1406418156  /* 0x53d43cec */
  92#define K60 -1482130984  /* 0xa7a879d8 */
  93#define K61  1330705329  /* 0x4f50f3b1 */
  94#define K62 -1633556638  /* 0x9ea1e762 */
  95#define K63  1027854021  /* 0x3d43cec5 */
  96
  97/* Register macros */
  98
  99#define RSTATE %rdi
 100#define RDATA  %rsi
 101#define RNBLKS %rdx
 102
 103#define t0 %eax
 104#define t1 %ebx
 105#define t2 %ecx
 106
 107#define a %r8d
 108#define b %r9d
 109#define c %r10d
 110#define d %r11d
 111#define e %r12d
 112#define f %r13d
 113#define g %r14d
 114#define h %r15d
 115
 116#define W0 %xmm0
 117#define W1 %xmm1
 118#define W2 %xmm2
 119#define W3 %xmm3
 120#define W4 %xmm4
 121#define W5 %xmm5
 122
 123#define XTMP0 %xmm6
 124#define XTMP1 %xmm7
 125#define XTMP2 %xmm8
 126#define XTMP3 %xmm9
 127#define XTMP4 %xmm10
 128#define XTMP5 %xmm11
 129#define XTMP6 %xmm12
 130
 131#define BSWAP_REG %xmm15
 132
 133/* Stack structure */
 134
 135#define STACK_W_SIZE        (32 * 2 * 3)
 136#define STACK_REG_SAVE_SIZE (64)
 137
 138#define STACK_W             (0)
 139#define STACK_REG_SAVE      (STACK_W + STACK_W_SIZE)
 140#define STACK_SIZE          (STACK_REG_SAVE + STACK_REG_SAVE_SIZE)
 141
 142/* Instruction helpers. */
 143
 144#define roll2(v, reg)           \
 145        roll $(v), reg;
 146
 147#define roll3mov(v, src, dst)   \
 148        movl src, dst;          \
 149        roll $(v), dst;
 150
 151#define roll3(v, src, dst)      \
 152        rorxl $(32-(v)), src, dst;
 153
 154#define addl2(a, out)           \
 155        leal (a, out), out;
 156
 157/* Round function macros. */
 158
 159#define GG1(x, y, z, o, t)      \
 160        movl x, o;              \
 161        xorl y, o;              \
 162        xorl z, o;
 163
 164#define FF1(x, y, z, o, t) GG1(x, y, z, o, t)
 165
 166#define GG2(x, y, z, o, t)      \
 167        andnl z, x, o;          \
 168        movl y, t;              \
 169        andl x, t;              \
 170        addl2(t, o);
 171
 172#define FF2(x, y, z, o, t)      \
 173        movl y, o;              \
 174        xorl x, o;              \
 175        movl y, t;              \
 176        andl x, t;              \
 177        andl z, o;              \
 178        xorl t, o;
 179
 180#define R(i, a, b, c, d, e, f, g, h, round, widx, wtype)                \
 181        /* rol(a, 12) => t0 */                                          \
 182        roll3mov(12, a, t0); /* rorxl here would reduce perf by 6% on zen3 */ \
 183        /* rol (t0 + e + t), 7) => t1 */                                \
 184        leal K##round(t0, e, 1), t1;                                    \
 185        roll2(7, t1);                                                   \
 186        /* h + w1 => h */                                               \
 187        addl wtype##_W1_ADDR(round, widx), h;                           \
 188        /* h + t1 => h */                                               \
 189        addl2(t1, h);                                                   \
 190        /* t1 ^ t0 => t0 */                                             \
 191        xorl t1, t0;                                                    \
 192        /* w1w2 + d => d */                                             \
 193        addl wtype##_W1W2_ADDR(round, widx), d;                         \
 194        /* FF##i(a,b,c) => t1 */                                        \
 195        FF##i(a, b, c, t1, t2);                                         \
 196        /* d + t1 => d */                                               \
 197        addl2(t1, d);                                                   \
 198        /* GG#i(e,f,g) => t2 */                                         \
 199        GG##i(e, f, g, t2, t1);                                         \
 200        /* h + t2 => h */                                               \
 201        addl2(t2, h);                                                   \
 202        /* rol (f, 19) => f */                                          \
 203        roll2(19, f);                                                   \
 204        /* d + t0 => d */                                               \
 205        addl2(t0, d);                                                   \
 206        /* rol (b, 9) => b */                                           \
 207        roll2(9, b);                                                    \
 208        /* P0(h) => h */                                                \
 209        roll3(9, h, t2);                                                \
 210        roll3(17, h, t1);                                               \
 211        xorl t2, h;                                                     \
 212        xorl t1, h;
 213
 214#define R1(a, b, c, d, e, f, g, h, round, widx, wtype) \
 215        R(1, a, b, c, d, e, f, g, h, round, widx, wtype)
 216
 217#define R2(a, b, c, d, e, f, g, h, round, widx, wtype) \
 218        R(2, a, b, c, d, e, f, g, h, round, widx, wtype)
 219
 220/* Input expansion macros. */
 221
 222/* Byte-swapped input address. */
 223#define IW_W_ADDR(round, widx, offs) \
 224        (STACK_W + ((round) / 4) * 64 + (offs) + ((widx) * 4))(%rsp)
 225
 226/* Expanded input address. */
 227#define XW_W_ADDR(round, widx, offs) \
 228        (STACK_W + ((((round) / 3) - 4) % 2) * 64 + (offs) + ((widx) * 4))(%rsp)
 229
 230/* Rounds 1-12, byte-swapped input block addresses. */
 231#define IW_W1_ADDR(round, widx)   IW_W_ADDR(round, widx, 0)
 232#define IW_W1W2_ADDR(round, widx) IW_W_ADDR(round, widx, 32)
 233
 234/* Rounds 1-12, expanded input block addresses. */
 235#define XW_W1_ADDR(round, widx)   XW_W_ADDR(round, widx, 0)
 236#define XW_W1W2_ADDR(round, widx) XW_W_ADDR(round, widx, 32)
 237
 238/* Input block loading. */
 239#define LOAD_W_XMM_1()                                                  \
 240        vmovdqu 0*16(RDATA), XTMP0; /* XTMP0: w3, w2, w1, w0 */         \
 241        vmovdqu 1*16(RDATA), XTMP1; /* XTMP1: w7, w6, w5, w4 */         \
 242        vmovdqu 2*16(RDATA), XTMP2; /* XTMP2: w11, w10, w9, w8 */       \
 243        vmovdqu 3*16(RDATA), XTMP3; /* XTMP3: w15, w14, w13, w12 */     \
 244        vpshufb BSWAP_REG, XTMP0, XTMP0;                                \
 245        vpshufb BSWAP_REG, XTMP1, XTMP1;                                \
 246        vpshufb BSWAP_REG, XTMP2, XTMP2;                                \
 247        vpshufb BSWAP_REG, XTMP3, XTMP3;                                \
 248        vpxor XTMP0, XTMP1, XTMP4;                                      \
 249        vpxor XTMP1, XTMP2, XTMP5;                                      \
 250        vpxor XTMP2, XTMP3, XTMP6;                                      \
 251        leaq 64(RDATA), RDATA;                                          \
 252        vmovdqa XTMP0, IW_W1_ADDR(0, 0);                                \
 253        vmovdqa XTMP4, IW_W1W2_ADDR(0, 0);                              \
 254        vmovdqa XTMP1, IW_W1_ADDR(4, 0);                                \
 255        vmovdqa XTMP5, IW_W1W2_ADDR(4, 0);
 256
 257#define LOAD_W_XMM_2()                          \
 258        vmovdqa XTMP2, IW_W1_ADDR(8, 0);        \
 259        vmovdqa XTMP6, IW_W1W2_ADDR(8, 0);
 260
 261#define LOAD_W_XMM_3()                                                  \
 262        vpshufd $0b00000000, XTMP0, W0; /* W0: xx, w0, xx, xx */        \
 263        vpshufd $0b11111001, XTMP0, W1; /* W1: xx, w3, w2, w1 */        \
 264        vmovdqa XTMP1, W2;              /* W2: xx, w6, w5, w4 */        \
 265        vpalignr $12, XTMP1, XTMP2, W3; /* W3: xx, w9, w8, w7 */        \
 266        vpalignr $8, XTMP2, XTMP3, W4;  /* W4: xx, w12, w11, w10 */     \
 267        vpshufd $0b11111001, XTMP3, W5; /* W5: xx, w15, w14, w13 */
 268
 269/* Message scheduling. Note: 3 words per XMM register. */
 270#define SCHED_W_0(round, w0, w1, w2, w3, w4, w5)                        \
 271        /* Load (w[i - 16]) => XTMP0 */                                 \
 272        vpshufd $0b10111111, w0, XTMP0;                                 \
 273        vpalignr $12, XTMP0, w1, XTMP0; /* XTMP0: xx, w2, w1, w0 */     \
 274        /* Load (w[i - 13]) => XTMP1 */                                 \
 275        vpshufd $0b10111111, w1, XTMP1;                                 \
 276        vpalignr $12, XTMP1, w2, XTMP1;                                 \
 277        /* w[i - 9] == w3 */                                            \
 278        /* XMM3 ^ XTMP0 => XTMP0 */                                     \
 279        vpxor w3, XTMP0, XTMP0;
 280
 281#define SCHED_W_1(round, w0, w1, w2, w3, w4, w5)        \
 282        /* w[i - 3] == w5 */                            \
 283        /* rol(XMM5, 15) ^ XTMP0 => XTMP0 */            \
 284        vpslld $15, w5, XTMP2;                          \
 285        vpsrld $(32-15), w5, XTMP3;                     \
 286        vpxor XTMP2, XTMP3, XTMP3;                      \
 287        vpxor XTMP3, XTMP0, XTMP0;                      \
 288        /* rol(XTMP1, 7) => XTMP1 */                    \
 289        vpslld $7, XTMP1, XTMP5;                        \
 290        vpsrld $(32-7), XTMP1, XTMP1;                   \
 291        vpxor XTMP5, XTMP1, XTMP1;                      \
 292        /* XMM4 ^ XTMP1 => XTMP1 */                     \
 293        vpxor w4, XTMP1, XTMP1;                         \
 294        /* w[i - 6] == XMM4 */                          \
 295        /* P1(XTMP0) ^ XTMP1 => XMM0 */                 \
 296        vpslld $15, XTMP0, XTMP5;                       \
 297        vpsrld $(32-15), XTMP0, XTMP6;                  \
 298        vpslld $23, XTMP0, XTMP2;                       \
 299        vpsrld $(32-23), XTMP0, XTMP3;                  \
 300        vpxor XTMP0, XTMP1, XTMP1;                      \
 301        vpxor XTMP6, XTMP5, XTMP5;                      \
 302        vpxor XTMP3, XTMP2, XTMP2;                      \
 303        vpxor XTMP2, XTMP5, XTMP5;                      \
 304        vpxor XTMP5, XTMP1, w0;
 305
 306#define SCHED_W_2(round, w0, w1, w2, w3, w4, w5)        \
 307        /* W1 in XMM12 */                               \
 308        vpshufd $0b10111111, w4, XTMP4;                 \
 309        vpalignr $12, XTMP4, w5, XTMP4;                 \
 310        vmovdqa XTMP4, XW_W1_ADDR((round), 0);          \
 311        /* W1 ^ W2 => XTMP1 */                          \
 312        vpxor w0, XTMP4, XTMP1;                         \
 313        vmovdqa XTMP1, XW_W1W2_ADDR((round), 0);
 314
 315
 316.section        .rodata.cst16, "aM", @progbits, 16
 317.align 16
 318
 319.Lbe32mask:
 320        .long 0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f
 321
 322.text
 323
 324/*
 325 * Transform nblocks*64 bytes (nblocks*16 32-bit words) at DATA.
 326 *
 327 * void sm3_transform_avx(struct sm3_state *state,
 328 *                        const u8 *data, int nblocks);
 329 */
 330.align 16
 331SYM_FUNC_START(sm3_transform_avx)
 332        /* input:
 333         *      %rdi: ctx, CTX
 334         *      %rsi: data (64*nblks bytes)
 335         *      %rdx: nblocks
 336         */
 337        vzeroupper;
 338
 339        pushq %rbp;
 340        movq %rsp, %rbp;
 341
 342        movq %rdx, RNBLKS;
 343
 344        subq $STACK_SIZE, %rsp;
 345        andq $(~63), %rsp;
 346
 347        movq %rbx, (STACK_REG_SAVE + 0 * 8)(%rsp);
 348        movq %r15, (STACK_REG_SAVE + 1 * 8)(%rsp);
 349        movq %r14, (STACK_REG_SAVE + 2 * 8)(%rsp);
 350        movq %r13, (STACK_REG_SAVE + 3 * 8)(%rsp);
 351        movq %r12, (STACK_REG_SAVE + 4 * 8)(%rsp);
 352
 353        vmovdqa .Lbe32mask (%rip), BSWAP_REG;
 354
 355        /* Get the values of the chaining variables. */
 356        movl state_h0(RSTATE), a;
 357        movl state_h1(RSTATE), b;
 358        movl state_h2(RSTATE), c;
 359        movl state_h3(RSTATE), d;
 360        movl state_h4(RSTATE), e;
 361        movl state_h5(RSTATE), f;
 362        movl state_h6(RSTATE), g;
 363        movl state_h7(RSTATE), h;
 364
 365.align 16
 366.Loop:
 367        /* Load data part1. */
 368        LOAD_W_XMM_1();
 369
 370        leaq -1(RNBLKS), RNBLKS;
 371
 372        /* Transform 0-3 + Load data part2. */
 373        R1(a, b, c, d, e, f, g, h, 0, 0, IW); LOAD_W_XMM_2();
 374        R1(d, a, b, c, h, e, f, g, 1, 1, IW);
 375        R1(c, d, a, b, g, h, e, f, 2, 2, IW);
 376        R1(b, c, d, a, f, g, h, e, 3, 3, IW); LOAD_W_XMM_3();
 377
 378        /* Transform 4-7 + Precalc 12-14. */
 379        R1(a, b, c, d, e, f, g, h, 4, 0, IW);
 380        R1(d, a, b, c, h, e, f, g, 5, 1, IW);
 381        R1(c, d, a, b, g, h, e, f, 6, 2, IW); SCHED_W_0(12, W0, W1, W2, W3, W4, W5);
 382        R1(b, c, d, a, f, g, h, e, 7, 3, IW); SCHED_W_1(12, W0, W1, W2, W3, W4, W5);
 383
 384        /* Transform 8-11 + Precalc 12-17. */
 385        R1(a, b, c, d, e, f, g, h, 8, 0, IW); SCHED_W_2(12, W0, W1, W2, W3, W4, W5);
 386        R1(d, a, b, c, h, e, f, g, 9, 1, IW); SCHED_W_0(15, W1, W2, W3, W4, W5, W0);
 387        R1(c, d, a, b, g, h, e, f, 10, 2, IW); SCHED_W_1(15, W1, W2, W3, W4, W5, W0);
 388        R1(b, c, d, a, f, g, h, e, 11, 3, IW); SCHED_W_2(15, W1, W2, W3, W4, W5, W0);
 389
 390        /* Transform 12-14 + Precalc 18-20 */
 391        R1(a, b, c, d, e, f, g, h, 12, 0, XW); SCHED_W_0(18, W2, W3, W4, W5, W0, W1);
 392        R1(d, a, b, c, h, e, f, g, 13, 1, XW); SCHED_W_1(18, W2, W3, W4, W5, W0, W1);
 393        R1(c, d, a, b, g, h, e, f, 14, 2, XW); SCHED_W_2(18, W2, W3, W4, W5, W0, W1);
 394
 395        /* Transform 15-17 + Precalc 21-23 */
 396        R1(b, c, d, a, f, g, h, e, 15, 0, XW); SCHED_W_0(21, W3, W4, W5, W0, W1, W2);
 397        R2(a, b, c, d, e, f, g, h, 16, 1, XW); SCHED_W_1(21, W3, W4, W5, W0, W1, W2);
 398        R2(d, a, b, c, h, e, f, g, 17, 2, XW); SCHED_W_2(21, W3, W4, W5, W0, W1, W2);
 399
 400        /* Transform 18-20 + Precalc 24-26 */
 401        R2(c, d, a, b, g, h, e, f, 18, 0, XW); SCHED_W_0(24, W4, W5, W0, W1, W2, W3);
 402        R2(b, c, d, a, f, g, h, e, 19, 1, XW); SCHED_W_1(24, W4, W5, W0, W1, W2, W3);
 403        R2(a, b, c, d, e, f, g, h, 20, 2, XW); SCHED_W_2(24, W4, W5, W0, W1, W2, W3);
 404
 405        /* Transform 21-23 + Precalc 27-29 */
 406        R2(d, a, b, c, h, e, f, g, 21, 0, XW); SCHED_W_0(27, W5, W0, W1, W2, W3, W4);
 407        R2(c, d, a, b, g, h, e, f, 22, 1, XW); SCHED_W_1(27, W5, W0, W1, W2, W3, W4);
 408        R2(b, c, d, a, f, g, h, e, 23, 2, XW); SCHED_W_2(27, W5, W0, W1, W2, W3, W4);
 409
 410        /* Transform 24-26 + Precalc 30-32 */
 411        R2(a, b, c, d, e, f, g, h, 24, 0, XW); SCHED_W_0(30, W0, W1, W2, W3, W4, W5);
 412        R2(d, a, b, c, h, e, f, g, 25, 1, XW); SCHED_W_1(30, W0, W1, W2, W3, W4, W5);
 413        R2(c, d, a, b, g, h, e, f, 26, 2, XW); SCHED_W_2(30, W0, W1, W2, W3, W4, W5);
 414
 415        /* Transform 27-29 + Precalc 33-35 */
 416        R2(b, c, d, a, f, g, h, e, 27, 0, XW); SCHED_W_0(33, W1, W2, W3, W4, W5, W0);
 417        R2(a, b, c, d, e, f, g, h, 28, 1, XW); SCHED_W_1(33, W1, W2, W3, W4, W5, W0);
 418        R2(d, a, b, c, h, e, f, g, 29, 2, XW); SCHED_W_2(33, W1, W2, W3, W4, W5, W0);
 419
 420        /* Transform 30-32 + Precalc 36-38 */
 421        R2(c, d, a, b, g, h, e, f, 30, 0, XW); SCHED_W_0(36, W2, W3, W4, W5, W0, W1);
 422        R2(b, c, d, a, f, g, h, e, 31, 1, XW); SCHED_W_1(36, W2, W3, W4, W5, W0, W1);
 423        R2(a, b, c, d, e, f, g, h, 32, 2, XW); SCHED_W_2(36, W2, W3, W4, W5, W0, W1);
 424
 425        /* Transform 33-35 + Precalc 39-41 */
 426        R2(d, a, b, c, h, e, f, g, 33, 0, XW); SCHED_W_0(39, W3, W4, W5, W0, W1, W2);
 427        R2(c, d, a, b, g, h, e, f, 34, 1, XW); SCHED_W_1(39, W3, W4, W5, W0, W1, W2);
 428        R2(b, c, d, a, f, g, h, e, 35, 2, XW); SCHED_W_2(39, W3, W4, W5, W0, W1, W2);
 429
 430        /* Transform 36-38 + Precalc 42-44 */
 431        R2(a, b, c, d, e, f, g, h, 36, 0, XW); SCHED_W_0(42, W4, W5, W0, W1, W2, W3);
 432        R2(d, a, b, c, h, e, f, g, 37, 1, XW); SCHED_W_1(42, W4, W5, W0, W1, W2, W3);
 433        R2(c, d, a, b, g, h, e, f, 38, 2, XW); SCHED_W_2(42, W4, W5, W0, W1, W2, W3);
 434
 435        /* Transform 39-41 + Precalc 45-47 */
 436        R2(b, c, d, a, f, g, h, e, 39, 0, XW); SCHED_W_0(45, W5, W0, W1, W2, W3, W4);
 437        R2(a, b, c, d, e, f, g, h, 40, 1, XW); SCHED_W_1(45, W5, W0, W1, W2, W3, W4);
 438        R2(d, a, b, c, h, e, f, g, 41, 2, XW); SCHED_W_2(45, W5, W0, W1, W2, W3, W4);
 439
 440        /* Transform 42-44 + Precalc 48-50 */
 441        R2(c, d, a, b, g, h, e, f, 42, 0, XW); SCHED_W_0(48, W0, W1, W2, W3, W4, W5);
 442        R2(b, c, d, a, f, g, h, e, 43, 1, XW); SCHED_W_1(48, W0, W1, W2, W3, W4, W5);
 443        R2(a, b, c, d, e, f, g, h, 44, 2, XW); SCHED_W_2(48, W0, W1, W2, W3, W4, W5);
 444
 445        /* Transform 45-47 + Precalc 51-53 */
 446        R2(d, a, b, c, h, e, f, g, 45, 0, XW); SCHED_W_0(51, W1, W2, W3, W4, W5, W0);
 447        R2(c, d, a, b, g, h, e, f, 46, 1, XW); SCHED_W_1(51, W1, W2, W3, W4, W5, W0);
 448        R2(b, c, d, a, f, g, h, e, 47, 2, XW); SCHED_W_2(51, W1, W2, W3, W4, W5, W0);
 449
 450        /* Transform 48-50 + Precalc 54-56 */
 451        R2(a, b, c, d, e, f, g, h, 48, 0, XW); SCHED_W_0(54, W2, W3, W4, W5, W0, W1);
 452        R2(d, a, b, c, h, e, f, g, 49, 1, XW); SCHED_W_1(54, W2, W3, W4, W5, W0, W1);
 453        R2(c, d, a, b, g, h, e, f, 50, 2, XW); SCHED_W_2(54, W2, W3, W4, W5, W0, W1);
 454
 455        /* Transform 51-53 + Precalc 57-59 */
 456        R2(b, c, d, a, f, g, h, e, 51, 0, XW); SCHED_W_0(57, W3, W4, W5, W0, W1, W2);
 457        R2(a, b, c, d, e, f, g, h, 52, 1, XW); SCHED_W_1(57, W3, W4, W5, W0, W1, W2);
 458        R2(d, a, b, c, h, e, f, g, 53, 2, XW); SCHED_W_2(57, W3, W4, W5, W0, W1, W2);
 459
 460        /* Transform 54-56 + Precalc 60-62 */
 461        R2(c, d, a, b, g, h, e, f, 54, 0, XW); SCHED_W_0(60, W4, W5, W0, W1, W2, W3);
 462        R2(b, c, d, a, f, g, h, e, 55, 1, XW); SCHED_W_1(60, W4, W5, W0, W1, W2, W3);
 463        R2(a, b, c, d, e, f, g, h, 56, 2, XW); SCHED_W_2(60, W4, W5, W0, W1, W2, W3);
 464
 465        /* Transform 57-59 + Precalc 63 */
 466        R2(d, a, b, c, h, e, f, g, 57, 0, XW); SCHED_W_0(63, W5, W0, W1, W2, W3, W4);
 467        R2(c, d, a, b, g, h, e, f, 58, 1, XW);
 468        R2(b, c, d, a, f, g, h, e, 59, 2, XW); SCHED_W_1(63, W5, W0, W1, W2, W3, W4);
 469
 470        /* Transform 60-62 + Precalc 63 */
 471        R2(a, b, c, d, e, f, g, h, 60, 0, XW);
 472        R2(d, a, b, c, h, e, f, g, 61, 1, XW); SCHED_W_2(63, W5, W0, W1, W2, W3, W4);
 473        R2(c, d, a, b, g, h, e, f, 62, 2, XW);
 474
 475        /* Transform 63 */
 476        R2(b, c, d, a, f, g, h, e, 63, 0, XW);
 477
 478        /* Update the chaining variables. */
 479        xorl state_h0(RSTATE), a;
 480        xorl state_h1(RSTATE), b;
 481        xorl state_h2(RSTATE), c;
 482        xorl state_h3(RSTATE), d;
 483        movl a, state_h0(RSTATE);
 484        movl b, state_h1(RSTATE);
 485        movl c, state_h2(RSTATE);
 486        movl d, state_h3(RSTATE);
 487        xorl state_h4(RSTATE), e;
 488        xorl state_h5(RSTATE), f;
 489        xorl state_h6(RSTATE), g;
 490        xorl state_h7(RSTATE), h;
 491        movl e, state_h4(RSTATE);
 492        movl f, state_h5(RSTATE);
 493        movl g, state_h6(RSTATE);
 494        movl h, state_h7(RSTATE);
 495
 496        cmpq $0, RNBLKS;
 497        jne .Loop;
 498
 499        vzeroall;
 500
 501        movq (STACK_REG_SAVE + 0 * 8)(%rsp), %rbx;
 502        movq (STACK_REG_SAVE + 1 * 8)(%rsp), %r15;
 503        movq (STACK_REG_SAVE + 2 * 8)(%rsp), %r14;
 504        movq (STACK_REG_SAVE + 3 * 8)(%rsp), %r13;
 505        movq (STACK_REG_SAVE + 4 * 8)(%rsp), %r12;
 506
 507        vmovdqa %xmm0, IW_W1_ADDR(0, 0);
 508        vmovdqa %xmm0, IW_W1W2_ADDR(0, 0);
 509        vmovdqa %xmm0, IW_W1_ADDR(4, 0);
 510        vmovdqa %xmm0, IW_W1W2_ADDR(4, 0);
 511        vmovdqa %xmm0, IW_W1_ADDR(8, 0);
 512        vmovdqa %xmm0, IW_W1W2_ADDR(8, 0);
 513
 514        movq %rbp, %rsp;
 515        popq %rbp;
 516        RET;
 517SYM_FUNC_END(sm3_transform_avx)
 518