1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52#include <linux/linkage.h>
53
54.text
55
56
57Y_0 = %ymm4
58Y_1 = %ymm5
59Y_2 = %ymm6
60Y_3 = %ymm7
61
62YTMP0 = %ymm0
63YTMP1 = %ymm1
64YTMP2 = %ymm2
65YTMP3 = %ymm3
66YTMP4 = %ymm8
67XFER = YTMP0
68
69BYTE_FLIP_MASK = %ymm9
70
71
72CTX1 = %rdi
73CTX2 = %r12
74
75INP = %rsi
76
77NUM_BLKS = %rdx
78
79c = %rcx
80d = %r8
81e = %rdx
82y3 = %rsi
83
84TBL = %rdi
85
86a = %rax
87b = %rbx
88
89f = %r9
90g = %r10
91h = %r11
92old_h = %r11
93
94T1 = %r12
95y0 = %r13
96y1 = %r14
97y2 = %r15
98
99
100XFER_SIZE = 4*8
101SRND_SIZE = 1*8
102INP_SIZE = 1*8
103INPEND_SIZE = 1*8
104CTX_SIZE = 1*8
105
106frame_XFER = 0
107frame_SRND = frame_XFER + XFER_SIZE
108frame_INP = frame_SRND + SRND_SIZE
109frame_INPEND = frame_INP + INP_SIZE
110frame_CTX = frame_INPEND + INPEND_SIZE
111frame_size = frame_CTX + CTX_SIZE
112
113
114#define VMOVDQ vmovdqu
115
116
117
118.macro addm p1 p2
119 add \p1, \p2
120 mov \p2, \p1
121.endm
122
123
124
125
126.macro COPY_YMM_AND_BSWAP p1 p2 p3
127 VMOVDQ \p2, \p1
128 vpshufb \p3, \p1, \p1
129.endm
130
131
132.macro rotate_Ys
133 Y_ = Y_0
134 Y_0 = Y_1
135 Y_1 = Y_2
136 Y_2 = Y_3
137 Y_3 = Y_
138.endm
139
140
141.macro RotateState
142
143 old_h = h
144 TMP_ = h
145 h = g
146 g = f
147 f = e
148 e = d
149 d = c
150 c = b
151 b = a
152 a = TMP_
153.endm
154
155
156
157.macro MY_VPALIGNR YDST YSRC1 YSRC2 RVAL
158 vperm2f128 $0x3, \YSRC2, \YSRC1, \YDST
159 vpalignr $\RVAL, \YSRC2, \YDST, \YDST
160.endm
161
162.macro FOUR_ROUNDS_AND_SCHED
163
164
165
166 MY_VPALIGNR YTMP0, Y_3, Y_2, 8
167
168 vpaddq Y_0, YTMP0, YTMP0
169
170 MY_VPALIGNR YTMP1, Y_1, Y_0, 8
171
172
173
174
175 vpsrlq $1, YTMP1, YTMP2
176 vpsllq $(64-1), YTMP1, YTMP3
177 vpor YTMP2, YTMP3, YTMP3
178
179 vpsrlq $7, YTMP1, YTMP4
180
181 mov a, y3
182 rorx $41, e, y0
183 rorx $18, e, y1
184 add frame_XFER(%rsp),h
185 or c, y3
186 mov f, y2
187 rorx $34, a, T1
188
189 xor y1, y0
190 xor g, y2
191 rorx $14, e, y1
192
193 and e, y2
194 xor y1, y0
195 rorx $39, a, y1
196 add h, d
197
198 and b, y3
199 xor T1, y1
200 rorx $28, a, T1
201
202 xor g, y2
203 xor T1, y1
204 mov a, T1
205 and c, T1
206
207 add y0, y2
208 or T1, y3
209 add y1, h
210
211 add y2, d
212
213 add y2, h
214 add y3, h
215
216 RotateState
217
218
219
220
221 vpsrlq $8, YTMP1, YTMP2
222 vpsllq $(64-8), YTMP1, YTMP1
223 vpor YTMP2, YTMP1, YTMP1
224
225 vpxor YTMP4, YTMP3, YTMP3
226 vpxor YTMP1, YTMP3, YTMP1
227
228
229
230 vpaddq YTMP1, YTMP0, YTMP0
231
232 vperm2f128 $0x0, YTMP0, YTMP0, Y_0
233
234 vpand MASK_YMM_LO(%rip), YTMP0, YTMP0
235
236
237
238
239 vperm2f128 $0x11, Y_3, Y_3, YTMP2
240 vpsrlq $6, YTMP2, YTMP4
241
242
243 mov a, y3
244 rorx $41, e, y0
245 rorx $18, e, y1
246 add 1*8+frame_XFER(%rsp), h
247 or c, y3
248
249
250 mov f, y2
251 rorx $34, a, T1
252 xor y1, y0
253 xor g, y2
254
255
256 rorx $14, e, y1
257 xor y1, y0
258 rorx $39, a, y1
259 and e, y2
260 add h, d
261
262 and b, y3
263 xor T1, y1
264
265 rorx $28, a, T1
266 xor g, y2
267
268 xor T1, y1
269 mov a, T1
270 and c, T1
271 add y0, y2
272
273 or T1, y3
274 add y1, h
275
276 add y2, d
277 add y2, h
278 add y3, h
279
280 RotateState
281
282
283
284
285 vpsrlq $19, YTMP2, YTMP3
286 vpsllq $(64-19), YTMP2, YTMP1
287 vpor YTMP1, YTMP3, YTMP3
288 vpxor YTMP3, YTMP4, YTMP4
289 vpsrlq $61, YTMP2, YTMP3
290 vpsllq $(64-61), YTMP2, YTMP1
291 vpor YTMP1, YTMP3, YTMP3
292 vpxor YTMP3, YTMP4, YTMP4
293
294
295
296 vpaddq YTMP4, Y_0, Y_0
297
298
299 vpsrlq $6, Y_0, YTMP4
300
301 mov a, y3
302 rorx $41, e, y0
303 add 2*8+frame_XFER(%rsp), h
304
305 rorx $18, e, y1
306 or c, y3
307 mov f, y2
308 xor g, y2
309
310 rorx $34, a, T1
311 xor y1, y0
312 and e, y2
313
314 rorx $14, e, y1
315 add h, d
316 and b, y3
317
318 xor y1, y0
319 rorx $39, a, y1
320 xor g, y2
321
322 xor T1, y1
323 rorx $28, a, T1
324
325 xor T1, y1
326 mov a, T1
327 and c, T1
328 add y0, y2
329
330 or T1, y3
331 add y1, h
332 add y2, d
333 add y2, h
334
335 add y3, h
336
337 RotateState
338
339
340
341 vpsrlq $19, Y_0, YTMP3
342 vpsllq $(64-19), Y_0, YTMP1
343 vpor YTMP1, YTMP3, YTMP3
344 vpxor YTMP3, YTMP4, YTMP4
345 vpsrlq $61, Y_0, YTMP3
346 vpsllq $(64-61), Y_0, YTMP1
347 vpor YTMP1, YTMP3, YTMP3
348 vpxor YTMP3, YTMP4, YTMP4
349
350
351
352
353 vpaddq YTMP4, YTMP0, YTMP2
354
355
356 vpblendd $0xF0, YTMP2, Y_0, Y_0
357
358 mov a, y3
359 rorx $41, e, y0
360 rorx $18, e, y1
361 add 3*8+frame_XFER(%rsp), h
362 or c, y3
363
364
365 mov f, y2
366 rorx $34, a, T1
367 xor y1, y0
368 xor g, y2
369
370
371 rorx $14, e, y1
372 and e, y2
373 add h, d
374 and b, y3
375
376 xor y1, y0
377 xor g, y2
378
379 rorx $39, a, y1
380 add y0, y2
381
382 xor T1, y1
383 add y2, d
384
385 rorx $28, a, T1
386
387 xor T1, y1
388 mov a, T1
389 and c, T1
390 or T1, y3
391
392 add y1, h
393 add y2, h
394 add y3, h
395
396 RotateState
397
398 rotate_Ys
399.endm
400
401.macro DO_4ROUNDS
402
403
404
405 mov f, y2
406 rorx $41, e, y0
407 rorx $18, e, y1
408 xor g, y2
409
410 xor y1, y0
411 rorx $14, e, y1
412 and e, y2
413
414 xor y1, y0
415 rorx $34, a, T1
416 xor g, y2
417 rorx $39, a, y1
418 mov a, y3
419
420 xor T1, y1
421 rorx $28, a, T1
422 add frame_XFER(%rsp), h
423 or c, y3
424
425 xor T1, y1
426 mov a, T1
427 and b, y3
428 and c, T1
429 add y0, y2
430
431 add h, d
432 or T1, y3
433 add y1, h
434
435 add y2, d
436
437 RotateState
438
439
440
441 add y2, old_h
442 mov f, y2
443 rorx $41, e, y0
444 rorx $18, e, y1
445 xor g, y2
446
447 xor y1, y0
448 rorx $14, e, y1
449 and e, y2
450 add y3, old_h
451
452 xor y1, y0
453 rorx $34, a, T1
454 xor g, y2
455 rorx $39, a, y1
456 mov a, y3
457
458 xor T1, y1
459 rorx $28, a, T1
460 add 8*1+frame_XFER(%rsp), h
461 or c, y3
462
463 xor T1, y1
464 mov a, T1
465 and b, y3
466 and c, T1
467 add y0, y2
468
469 add h, d
470 or T1, y3
471 add y1, h
472
473 add y2, d
474
475 RotateState
476
477
478
479 add y2, old_h
480 mov f, y2
481 rorx $41, e, y0
482 rorx $18, e, y1
483 xor g, y2
484
485 xor y1, y0
486 rorx $14, e, y1
487 and e, y2
488 add y3, old_h
489
490 xor y1, y0
491 rorx $34, a, T1
492 xor g, y2
493 rorx $39, a, y1
494 mov a, y3
495
496 xor T1, y1
497 rorx $28, a, T1
498 add 8*2+frame_XFER(%rsp), h
499 or c, y3
500
501 xor T1, y1
502 mov a, T1
503 and b, y3
504 and c, T1
505 add y0, y2
506
507 add h, d
508 or T1, y3
509 add y1, h
510
511 add y2, d
512
513 RotateState
514
515
516
517 add y2, old_h
518 mov f, y2
519 rorx $41, e, y0
520 rorx $18, e, y1
521 xor g, y2
522
523 xor y1, y0
524 rorx $14, e, y1
525 and e, y2
526 add y3, old_h
527
528 xor y1, y0
529 rorx $34, a, T1
530 xor g, y2
531 rorx $39, a, y1
532 mov a, y3
533
534 xor T1, y1
535 rorx $28, a, T1
536 add 8*3+frame_XFER(%rsp), h
537 or c, y3
538
539 xor T1, y1
540 mov a, T1
541 and b, y3
542 and c, T1
543 add y0, y2
544
545
546 add h, d
547 or T1, y3
548 add y1, h
549
550 add y2, d
551
552 add y2, h
553
554 add y3, h
555
556 RotateState
557
558.endm
559
560
561
562
563
564
565
566
567
568SYM_FUNC_START(sha512_transform_rorx)
569
570 push %rbx
571 push %r12
572 push %r13
573 push %r14
574 push %r15
575
576
577 push %rbp
578 mov %rsp, %rbp
579 sub $frame_size, %rsp
580 and $~(0x20 - 1), %rsp
581
582 shl $7, NUM_BLKS
583 jz done_hash
584 add INP, NUM_BLKS
585 mov NUM_BLKS, frame_INPEND(%rsp)
586
587
588 mov 8*0(CTX1), a
589 mov 8*1(CTX1), b
590 mov 8*2(CTX1), c
591 mov 8*3(CTX1), d
592 mov 8*4(CTX1), e
593 mov 8*5(CTX1), f
594 mov 8*6(CTX1), g
595 mov 8*7(CTX1), h
596
597
598 mov %rdi, frame_CTX(%rsp)
599
600 vmovdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK
601
602loop0:
603 lea K512(%rip), TBL
604
605
606 COPY_YMM_AND_BSWAP Y_0, (INP), BYTE_FLIP_MASK
607 COPY_YMM_AND_BSWAP Y_1, 1*32(INP), BYTE_FLIP_MASK
608 COPY_YMM_AND_BSWAP Y_2, 2*32(INP), BYTE_FLIP_MASK
609 COPY_YMM_AND_BSWAP Y_3, 3*32(INP), BYTE_FLIP_MASK
610
611 mov INP, frame_INP(%rsp)
612
613
614 movq $4, frame_SRND(%rsp)
615
616.align 16
617loop1:
618 vpaddq (TBL), Y_0, XFER
619 vmovdqa XFER, frame_XFER(%rsp)
620 FOUR_ROUNDS_AND_SCHED
621
622 vpaddq 1*32(TBL), Y_0, XFER
623 vmovdqa XFER, frame_XFER(%rsp)
624 FOUR_ROUNDS_AND_SCHED
625
626 vpaddq 2*32(TBL), Y_0, XFER
627 vmovdqa XFER, frame_XFER(%rsp)
628 FOUR_ROUNDS_AND_SCHED
629
630 vpaddq 3*32(TBL), Y_0, XFER
631 vmovdqa XFER, frame_XFER(%rsp)
632 add $(4*32), TBL
633 FOUR_ROUNDS_AND_SCHED
634
635 subq $1, frame_SRND(%rsp)
636 jne loop1
637
638 movq $2, frame_SRND(%rsp)
639loop2:
640 vpaddq (TBL), Y_0, XFER
641 vmovdqa XFER, frame_XFER(%rsp)
642 DO_4ROUNDS
643 vpaddq 1*32(TBL), Y_1, XFER
644 vmovdqa XFER, frame_XFER(%rsp)
645 add $(2*32), TBL
646 DO_4ROUNDS
647
648 vmovdqa Y_2, Y_0
649 vmovdqa Y_3, Y_1
650
651 subq $1, frame_SRND(%rsp)
652 jne loop2
653
654 mov frame_CTX(%rsp), CTX2
655 addm 8*0(CTX2), a
656 addm 8*1(CTX2), b
657 addm 8*2(CTX2), c
658 addm 8*3(CTX2), d
659 addm 8*4(CTX2), e
660 addm 8*5(CTX2), f
661 addm 8*6(CTX2), g
662 addm 8*7(CTX2), h
663
664 mov frame_INP(%rsp), INP
665 add $128, INP
666 cmp frame_INPEND(%rsp), INP
667 jne loop0
668
669done_hash:
670
671
672 mov %rbp, %rsp
673 pop %rbp
674
675
676 pop %r15
677 pop %r14
678 pop %r13
679 pop %r12
680 pop %rbx
681
682 ret
683SYM_FUNC_END(sha512_transform_rorx)
684
685
686
687
688
689
690
691
692.section .rodata.cst640.K512, "aM", @progbits, 640
693.align 64
694
695K512:
696 .quad 0x428a2f98d728ae22,0x7137449123ef65cd
697 .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
698 .quad 0x3956c25bf348b538,0x59f111f1b605d019
699 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
700 .quad 0xd807aa98a3030242,0x12835b0145706fbe
701 .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
702 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
703 .quad 0x9bdc06a725c71235,0xc19bf174cf692694
704 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
705 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
706 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
707 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
708 .quad 0x983e5152ee66dfab,0xa831c66d2db43210
709 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
710 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
711 .quad 0x06ca6351e003826f,0x142929670a0e6e70
712 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
713 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
714 .quad 0x650a73548baf63de,0x766a0abb3c77b2a8
715 .quad 0x81c2c92e47edaee6,0x92722c851482353b
716 .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
717 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30
718 .quad 0xd192e819d6ef5218,0xd69906245565a910
719 .quad 0xf40e35855771202a,0x106aa07032bbd1b8
720 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
721 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
722 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
723 .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
724 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60
725 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
726 .quad 0x90befffa23631e28,0xa4506cebde82bde9
727 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
728 .quad 0xca273eceea26619c,0xd186b8c721c0c207
729 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
730 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
731 .quad 0x113f9804bef90dae,0x1b710b35131c471b
732 .quad 0x28db77f523047d84,0x32caab7b40c72493
733 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
734 .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
735 .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
736
737.section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32
738.align 32
739
740PSHUFFLE_BYTE_FLIP_MASK:
741 .octa 0x08090a0b0c0d0e0f0001020304050607
742 .octa 0x18191a1b1c1d1e1f1011121314151617
743
744.section .rodata.cst32.MASK_YMM_LO, "aM", @progbits, 32
745.align 32
746MASK_YMM_LO:
747 .octa 0x00000000000000000000000000000000
748 .octa 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
749