1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52#ifdef CONFIG_AS_AVX2
53#include <linux/linkage.h>
54
55.text
56
57
58Y_0 = %ymm4
59Y_1 = %ymm5
60Y_2 = %ymm6
61Y_3 = %ymm7
62
63YTMP0 = %ymm0
64YTMP1 = %ymm1
65YTMP2 = %ymm2
66YTMP3 = %ymm3
67YTMP4 = %ymm8
68XFER = YTMP0
69
70BYTE_FLIP_MASK = %ymm9
71
72
73CTX1 = %rdi
74CTX2 = %r12
75
76INP = %rsi
77
78NUM_BLKS = %rdx
79
80c = %rcx
81d = %r8
82e = %rdx
83y3 = %rsi
84
85TBL = %rdi
86
87a = %rax
88b = %rbx
89
90f = %r9
91g = %r10
92h = %r11
93old_h = %r11
94
95T1 = %r12
96y0 = %r13
97y1 = %r14
98y2 = %r15
99
100
101XFER_SIZE = 4*8
102SRND_SIZE = 1*8
103INP_SIZE = 1*8
104INPEND_SIZE = 1*8
105CTX_SIZE = 1*8
106RSPSAVE_SIZE = 1*8
107GPRSAVE_SIZE = 5*8
108
109frame_XFER = 0
110frame_SRND = frame_XFER + XFER_SIZE
111frame_INP = frame_SRND + SRND_SIZE
112frame_INPEND = frame_INP + INP_SIZE
113frame_CTX = frame_INPEND + INPEND_SIZE
114frame_RSPSAVE = frame_CTX + CTX_SIZE
115frame_GPRSAVE = frame_RSPSAVE + RSPSAVE_SIZE
116frame_size = frame_GPRSAVE + GPRSAVE_SIZE
117
118
119#define VMOVDQ vmovdqu
120
121
122
123.macro addm p1 p2
124 add \p1, \p2
125 mov \p2, \p1
126.endm
127
128
129
130
131.macro COPY_YMM_AND_BSWAP p1 p2 p3
132 VMOVDQ \p2, \p1
133 vpshufb \p3, \p1, \p1
134.endm
135
136
137.macro rotate_Ys
138 Y_ = Y_0
139 Y_0 = Y_1
140 Y_1 = Y_2
141 Y_2 = Y_3
142 Y_3 = Y_
143.endm
144
145
146.macro RotateState
147
148 old_h = h
149 TMP_ = h
150 h = g
151 g = f
152 f = e
153 e = d
154 d = c
155 c = b
156 b = a
157 a = TMP_
158.endm
159
160
161
162.macro MY_VPALIGNR YDST YSRC1 YSRC2 RVAL
163 vperm2f128 $0x3, \YSRC2, \YSRC1, \YDST
164 vpalignr $\RVAL, \YSRC2, \YDST, \YDST
165.endm
166
167.macro FOUR_ROUNDS_AND_SCHED
168
169
170
171 MY_VPALIGNR YTMP0, Y_3, Y_2, 8
172
173 vpaddq Y_0, YTMP0, YTMP0
174
175 MY_VPALIGNR YTMP1, Y_1, Y_0, 8
176
177
178
179
180 vpsrlq $1, YTMP1, YTMP2
181 vpsllq $(64-1), YTMP1, YTMP3
182 vpor YTMP2, YTMP3, YTMP3
183
184 vpsrlq $7, YTMP1, YTMP4
185
186 mov a, y3
187 rorx $41, e, y0
188 rorx $18, e, y1
189 add frame_XFER(%rsp),h
190 or c, y3
191 mov f, y2
192 rorx $34, a, T1
193
194 xor y1, y0
195 xor g, y2
196 rorx $14, e, y1
197
198 and e, y2
199 xor y1, y0
200 rorx $39, a, y1
201 add h, d
202
203 and b, y3
204 xor T1, y1
205 rorx $28, a, T1
206
207 xor g, y2
208 xor T1, y1
209 mov a, T1
210 and c, T1
211
212 add y0, y2
213 or T1, y3
214 add y1, h
215
216 add y2, d
217
218 add y2, h
219 add y3, h
220
221 RotateState
222
223
224
225
226 vpsrlq $8, YTMP1, YTMP2
227 vpsllq $(64-8), YTMP1, YTMP1
228 vpor YTMP2, YTMP1, YTMP1
229
230 vpxor YTMP4, YTMP3, YTMP3
231 vpxor YTMP1, YTMP3, YTMP1
232
233
234
235 vpaddq YTMP1, YTMP0, YTMP0
236
237 vperm2f128 $0x0, YTMP0, YTMP0, Y_0
238
239 vpand MASK_YMM_LO(%rip), YTMP0, YTMP0
240
241
242
243
244 vperm2f128 $0x11, Y_3, Y_3, YTMP2
245 vpsrlq $6, YTMP2, YTMP4
246
247
248 mov a, y3
249 rorx $41, e, y0
250 rorx $18, e, y1
251 add 1*8+frame_XFER(%rsp), h
252 or c, y3
253
254
255 mov f, y2
256 rorx $34, a, T1
257 xor y1, y0
258 xor g, y2
259
260
261 rorx $14, e, y1
262 xor y1, y0
263 rorx $39, a, y1
264 and e, y2
265 add h, d
266
267 and b, y3
268 xor T1, y1
269
270 rorx $28, a, T1
271 xor g, y2
272
273 xor T1, y1
274 mov a, T1
275 and c, T1
276 add y0, y2
277
278 or T1, y3
279 add y1, h
280
281 add y2, d
282 add y2, h
283 add y3, h
284
285 RotateState
286
287
288
289
290 vpsrlq $19, YTMP2, YTMP3
291 vpsllq $(64-19), YTMP2, YTMP1
292 vpor YTMP1, YTMP3, YTMP3
293 vpxor YTMP3, YTMP4, YTMP4
294 vpsrlq $61, YTMP2, YTMP3
295 vpsllq $(64-61), YTMP2, YTMP1
296 vpor YTMP1, YTMP3, YTMP3
297 vpxor YTMP3, YTMP4, YTMP4
298
299
300
301 vpaddq YTMP4, Y_0, Y_0
302
303
304 vpsrlq $6, Y_0, YTMP4
305
306 mov a, y3
307 rorx $41, e, y0
308 add 2*8+frame_XFER(%rsp), h
309
310 rorx $18, e, y1
311 or c, y3
312 mov f, y2
313 xor g, y2
314
315 rorx $34, a, T1
316 xor y1, y0
317 and e, y2
318
319 rorx $14, e, y1
320 add h, d
321 and b, y3
322
323 xor y1, y0
324 rorx $39, a, y1
325 xor g, y2
326
327 xor T1, y1
328 rorx $28, a, T1
329
330 xor T1, y1
331 mov a, T1
332 and c, T1
333 add y0, y2
334
335 or T1, y3
336 add y1, h
337 add y2, d
338 add y2, h
339
340 add y3, h
341
342 RotateState
343
344
345
346 vpsrlq $19, Y_0, YTMP3
347 vpsllq $(64-19), Y_0, YTMP1
348 vpor YTMP1, YTMP3, YTMP3
349 vpxor YTMP3, YTMP4, YTMP4
350 vpsrlq $61, Y_0, YTMP3
351 vpsllq $(64-61), Y_0, YTMP1
352 vpor YTMP1, YTMP3, YTMP3
353 vpxor YTMP3, YTMP4, YTMP4
354
355
356
357
358 vpaddq YTMP4, YTMP0, YTMP2
359
360
361 vpblendd $0xF0, YTMP2, Y_0, Y_0
362
363 mov a, y3
364 rorx $41, e, y0
365 rorx $18, e, y1
366 add 3*8+frame_XFER(%rsp), h
367 or c, y3
368
369
370 mov f, y2
371 rorx $34, a, T1
372 xor y1, y0
373 xor g, y2
374
375
376 rorx $14, e, y1
377 and e, y2
378 add h, d
379 and b, y3
380
381 xor y1, y0
382 xor g, y2
383
384 rorx $39, a, y1
385 add y0, y2
386
387 xor T1, y1
388 add y2, d
389
390 rorx $28, a, T1
391
392 xor T1, y1
393 mov a, T1
394 and c, T1
395 or T1, y3
396
397 add y1, h
398 add y2, h
399 add y3, h
400
401 RotateState
402
403 rotate_Ys
404.endm
405
406.macro DO_4ROUNDS
407
408
409
410 mov f, y2
411 rorx $41, e, y0
412 rorx $18, e, y1
413 xor g, y2
414
415 xor y1, y0
416 rorx $14, e, y1
417 and e, y2
418
419 xor y1, y0
420 rorx $34, a, T1
421 xor g, y2
422 rorx $39, a, y1
423 mov a, y3
424
425 xor T1, y1
426 rorx $28, a, T1
427 add frame_XFER(%rsp), h
428 or c, y3
429
430 xor T1, y1
431 mov a, T1
432 and b, y3
433 and c, T1
434 add y0, y2
435
436 add h, d
437 or T1, y3
438 add y1, h
439
440 add y2, d
441
442 RotateState
443
444
445
446 add y2, old_h
447 mov f, y2
448 rorx $41, e, y0
449 rorx $18, e, y1
450 xor g, y2
451
452 xor y1, y0
453 rorx $14, e, y1
454 and e, y2
455 add y3, old_h
456
457 xor y1, y0
458 rorx $34, a, T1
459 xor g, y2
460 rorx $39, a, y1
461 mov a, y3
462
463 xor T1, y1
464 rorx $28, a, T1
465 add 8*1+frame_XFER(%rsp), h
466 or c, y3
467
468 xor T1, y1
469 mov a, T1
470 and b, y3
471 and c, T1
472 add y0, y2
473
474 add h, d
475 or T1, y3
476 add y1, h
477
478 add y2, d
479
480 RotateState
481
482
483
484 add y2, old_h
485 mov f, y2
486 rorx $41, e, y0
487 rorx $18, e, y1
488 xor g, y2
489
490 xor y1, y0
491 rorx $14, e, y1
492 and e, y2
493 add y3, old_h
494
495 xor y1, y0
496 rorx $34, a, T1
497 xor g, y2
498 rorx $39, a, y1
499 mov a, y3
500
501 xor T1, y1
502 rorx $28, a, T1
503 add 8*2+frame_XFER(%rsp), h
504 or c, y3
505
506 xor T1, y1
507 mov a, T1
508 and b, y3
509 and c, T1
510 add y0, y2
511
512 add h, d
513 or T1, y3
514 add y1, h
515
516 add y2, d
517
518 RotateState
519
520
521
522 add y2, old_h
523 mov f, y2
524 rorx $41, e, y0
525 rorx $18, e, y1
526 xor g, y2
527
528 xor y1, y0
529 rorx $14, e, y1
530 and e, y2
531 add y3, old_h
532
533 xor y1, y0
534 rorx $34, a, T1
535 xor g, y2
536 rorx $39, a, y1
537 mov a, y3
538
539 xor T1, y1
540 rorx $28, a, T1
541 add 8*3+frame_XFER(%rsp), h
542 or c, y3
543
544 xor T1, y1
545 mov a, T1
546 and b, y3
547 and c, T1
548 add y0, y2
549
550
551 add h, d
552 or T1, y3
553 add y1, h
554
555 add y2, d
556
557 add y2, h
558
559 add y3, h
560
561 RotateState
562
563.endm
564
565
566
567
568
569
570
571
572ENTRY(sha512_transform_rorx)
573
574 mov %rsp, %rax
575 sub $frame_size, %rsp
576 and $~(0x20 - 1), %rsp
577 mov %rax, frame_RSPSAVE(%rsp)
578
579
580 mov %rbx, 8*0+frame_GPRSAVE(%rsp)
581 mov %r12, 8*1+frame_GPRSAVE(%rsp)
582 mov %r13, 8*2+frame_GPRSAVE(%rsp)
583 mov %r14, 8*3+frame_GPRSAVE(%rsp)
584 mov %r15, 8*4+frame_GPRSAVE(%rsp)
585
586 shl $7, NUM_BLKS
587 jz done_hash
588 add INP, NUM_BLKS
589 mov NUM_BLKS, frame_INPEND(%rsp)
590
591
592 mov 8*0(CTX1), a
593 mov 8*1(CTX1), b
594 mov 8*2(CTX1), c
595 mov 8*3(CTX1), d
596 mov 8*4(CTX1), e
597 mov 8*5(CTX1), f
598 mov 8*6(CTX1), g
599 mov 8*7(CTX1), h
600
601
602 mov %rdi, frame_CTX(%rsp)
603
604 vmovdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK
605
606loop0:
607 lea K512(%rip), TBL
608
609
610 COPY_YMM_AND_BSWAP Y_0, (INP), BYTE_FLIP_MASK
611 COPY_YMM_AND_BSWAP Y_1, 1*32(INP), BYTE_FLIP_MASK
612 COPY_YMM_AND_BSWAP Y_2, 2*32(INP), BYTE_FLIP_MASK
613 COPY_YMM_AND_BSWAP Y_3, 3*32(INP), BYTE_FLIP_MASK
614
615 mov INP, frame_INP(%rsp)
616
617
618 movq $4, frame_SRND(%rsp)
619
620.align 16
621loop1:
622 vpaddq (TBL), Y_0, XFER
623 vmovdqa XFER, frame_XFER(%rsp)
624 FOUR_ROUNDS_AND_SCHED
625
626 vpaddq 1*32(TBL), Y_0, XFER
627 vmovdqa XFER, frame_XFER(%rsp)
628 FOUR_ROUNDS_AND_SCHED
629
630 vpaddq 2*32(TBL), Y_0, XFER
631 vmovdqa XFER, frame_XFER(%rsp)
632 FOUR_ROUNDS_AND_SCHED
633
634 vpaddq 3*32(TBL), Y_0, XFER
635 vmovdqa XFER, frame_XFER(%rsp)
636 add $(4*32), TBL
637 FOUR_ROUNDS_AND_SCHED
638
639 subq $1, frame_SRND(%rsp)
640 jne loop1
641
642 movq $2, frame_SRND(%rsp)
643loop2:
644 vpaddq (TBL), Y_0, XFER
645 vmovdqa XFER, frame_XFER(%rsp)
646 DO_4ROUNDS
647 vpaddq 1*32(TBL), Y_1, XFER
648 vmovdqa XFER, frame_XFER(%rsp)
649 add $(2*32), TBL
650 DO_4ROUNDS
651
652 vmovdqa Y_2, Y_0
653 vmovdqa Y_3, Y_1
654
655 subq $1, frame_SRND(%rsp)
656 jne loop2
657
658 mov frame_CTX(%rsp), CTX2
659 addm 8*0(CTX2), a
660 addm 8*1(CTX2), b
661 addm 8*2(CTX2), c
662 addm 8*3(CTX2), d
663 addm 8*4(CTX2), e
664 addm 8*5(CTX2), f
665 addm 8*6(CTX2), g
666 addm 8*7(CTX2), h
667
668 mov frame_INP(%rsp), INP
669 add $128, INP
670 cmp frame_INPEND(%rsp), INP
671 jne loop0
672
673done_hash:
674
675
676 mov 8*0+frame_GPRSAVE(%rsp), %rbx
677 mov 8*1+frame_GPRSAVE(%rsp), %r12
678 mov 8*2+frame_GPRSAVE(%rsp), %r13
679 mov 8*3+frame_GPRSAVE(%rsp), %r14
680 mov 8*4+frame_GPRSAVE(%rsp), %r15
681
682
683 mov frame_RSPSAVE(%rsp), %rsp
684 ret
685ENDPROC(sha512_transform_rorx)
686
687
688
689
690
691
692
693
694.section .rodata.cst640.K512, "aM", @progbits, 640
695.align 64
696
697K512:
698 .quad 0x428a2f98d728ae22,0x7137449123ef65cd
699 .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
700 .quad 0x3956c25bf348b538,0x59f111f1b605d019
701 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
702 .quad 0xd807aa98a3030242,0x12835b0145706fbe
703 .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
704 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
705 .quad 0x9bdc06a725c71235,0xc19bf174cf692694
706 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
707 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
708 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
709 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
710 .quad 0x983e5152ee66dfab,0xa831c66d2db43210
711 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
712 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
713 .quad 0x06ca6351e003826f,0x142929670a0e6e70
714 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
715 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
716 .quad 0x650a73548baf63de,0x766a0abb3c77b2a8
717 .quad 0x81c2c92e47edaee6,0x92722c851482353b
718 .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
719 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30
720 .quad 0xd192e819d6ef5218,0xd69906245565a910
721 .quad 0xf40e35855771202a,0x106aa07032bbd1b8
722 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
723 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
724 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
725 .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
726 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60
727 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
728 .quad 0x90befffa23631e28,0xa4506cebde82bde9
729 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
730 .quad 0xca273eceea26619c,0xd186b8c721c0c207
731 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
732 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
733 .quad 0x113f9804bef90dae,0x1b710b35131c471b
734 .quad 0x28db77f523047d84,0x32caab7b40c72493
735 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
736 .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
737 .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
738
739.section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32
740.align 32
741
742PSHUFFLE_BYTE_FLIP_MASK:
743 .octa 0x08090a0b0c0d0e0f0001020304050607
744 .octa 0x18191a1b1c1d1e1f1011121314151617
745
746.section .rodata.cst32.MASK_YMM_LO, "aM", @progbits, 32
747.align 32
748MASK_YMM_LO:
749 .octa 0x00000000000000000000000000000000
750 .octa 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
751
752#endif
753